hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf125ceee8cc59e4e42586e815f5f884ec87b43 | 633 | py | Python | walletbackend/manage.py | Fajar2020/MiniEWallet | bd843ab0a957f999dd557dd1f4e27a5d9d08b828 | [
"MIT"
] | null | null | null | walletbackend/manage.py | Fajar2020/MiniEWallet | bd843ab0a957f999dd557dd1f4e27a5d9d08b828 | [
"MIT"
] | 3 | 2021-10-06T19:43:07.000Z | 2022-02-27T07:55:40.000Z | walletbackend/manage.py | Fajar2020/MiniEWallet | bd843ab0a957f999dd557dd1f4e27a5d9d08b828 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'walletbackend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727 | 77 | 0.685624 |
acf126625c533e30a2b77986cea575cf28161563 | 399 | py | Python | examples/simple/programs/12_classes.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 31 | 2020-05-02T13:34:26.000Z | 2021-06-06T17:25:52.000Z | examples/simple/programs/12_classes.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 108 | 2019-11-18T19:41:52.000Z | 2022-03-18T13:58:17.000Z | examples/simple/programs/12_classes.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 4 | 2020-05-19T08:57:44.000Z | 2020-09-21T08:53:46.000Z | class BankAccount(object):
def __init__(self, initial_balance=0):
self.balance = initial_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
self.balance -= amount
def overdrawn(self):
return self.balance < 0
my_account = BankAccount(15)
my_account.withdraw(50)
print(my_account.balance, my_account.overdrawn())
| 22.166667 | 49 | 0.681704 |
acf1272e9d02b4af3de9f50bb633572493624f3b | 100,547 | py | Python | Cython/Compiler/Code.py | 0dminnimda/cython | 7b0e2856977185496f7d195b66b1d3bf69fb0c0f | [
"Apache-2.0"
] | 1 | 2021-06-02T06:18:45.000Z | 2021-06-02T06:18:45.000Z | Cython/Compiler/Code.py | stmckeni/cython | 5bf5aa63d6b6742144071a2af896067c21b3752a | [
"Apache-2.0"
] | 2 | 2021-07-13T17:05:02.000Z | 2021-08-02T03:47:28.000Z | Cython/Compiler/Code.py | 0dminnimda/cython | 7b0e2856977185496f7d195b66b1d3bf69fb0c0f | [
"Apache-2.0"
] | 1 | 2021-05-21T08:22:03.000Z | 2021-05-21T08:22:03.000Z | # cython: language_level=3str
# cython: auto_pickle=False
#
# Code output module
#
from __future__ import absolute_import
import cython
cython.declare(os=object, re=object, operator=object, textwrap=object,
Template=object, Naming=object, Options=object, StringEncoding=object,
Utils=object, SourceDescriptor=object, StringIOTree=object,
DebugFlags=object, basestring=object, defaultdict=object,
closing=object, partial=object)
import hashlib
import operator
import os
import re
import shutil
import textwrap
from string import Template
from functools import partial
from contextlib import closing
from collections import defaultdict
from . import Naming
from . import Options
from . import DebugFlags
from . import StringEncoding
from . import Version
from .. import Utils
from .Scanning import SourceDescriptor
from ..StringIOTree import StringIOTree
try:
from __builtin__ import basestring
except ImportError:
from builtins import str as basestring
non_portable_builtins_map = {
# builtins that have different names in different Python versions
'bytes' : ('PY_MAJOR_VERSION < 3', 'str'),
'unicode' : ('PY_MAJOR_VERSION >= 3', 'str'),
'basestring' : ('PY_MAJOR_VERSION >= 3', 'str'),
'xrange' : ('PY_MAJOR_VERSION >= 3', 'range'),
'raw_input' : ('PY_MAJOR_VERSION >= 3', 'input'),
}
ctypedef_builtins_map = {
# types of builtins in "ctypedef class" statements which we don't
# import either because the names conflict with C types or because
# the type simply is not exposed.
'py_int' : '&PyInt_Type',
'py_long' : '&PyLong_Type',
'py_float' : '&PyFloat_Type',
'wrapper_descriptor' : '&PyWrapperDescr_Type',
}
basicsize_builtins_map = {
# builtins whose type has a different tp_basicsize than sizeof(...)
'PyTypeObject': 'PyHeapTypeObject',
}
uncachable_builtins = [
# Global/builtin names that cannot be cached because they may or may not
# be available at import time, for various reasons:
## - Py3.7+
'breakpoint', # might deserve an implementation in Cython
## - Py3.4+
'__loader__',
'__spec__',
## - Py3+
'BlockingIOError',
'BrokenPipeError',
'ChildProcessError',
'ConnectionAbortedError',
'ConnectionError',
'ConnectionRefusedError',
'ConnectionResetError',
'FileExistsError',
'FileNotFoundError',
'InterruptedError',
'IsADirectoryError',
'ModuleNotFoundError',
'NotADirectoryError',
'PermissionError',
'ProcessLookupError',
'RecursionError',
'ResourceWarning',
#'StopAsyncIteration', # backported
'TimeoutError',
'__build_class__',
'ascii', # might deserve an implementation in Cython
#'exec', # implemented in Cython
## - platform specific
'WindowsError',
## - others
'_', # e.g. used by gettext
]
special_py_methods = cython.declare(frozenset, frozenset((
'__cinit__', '__dealloc__', '__richcmp__', '__next__',
'__await__', '__aiter__', '__anext__',
'__getreadbuffer__', '__getwritebuffer__', '__getsegcount__',
'__getcharbuffer__', '__getbuffer__', '__releasebuffer__',
)))
modifier_output_mapper = {
'inline': 'CYTHON_INLINE'
}.get
class IncludeCode(object):
"""
An include file and/or verbatim C code to be included in the
generated sources.
"""
# attributes:
#
# pieces {order: unicode}: pieces of C code to be generated.
# For the included file, the key "order" is zero.
# For verbatim include code, the "order" is the "order"
# attribute of the original IncludeCode where this piece
# of C code was first added. This is needed to prevent
# duplication if the same include code is found through
# multiple cimports.
# location int: where to put this include in the C sources, one
# of the constants INITIAL, EARLY, LATE
# order int: sorting order (automatically set by increasing counter)
# Constants for location. If the same include occurs with different
# locations, the earliest one takes precedense.
INITIAL = 0
EARLY = 1
LATE = 2
counter = 1 # Counter for "order"
def __init__(self, include=None, verbatim=None, late=True, initial=False):
self.order = self.counter
type(self).counter += 1
self.pieces = {}
if include:
if include[0] == '<' and include[-1] == '>':
self.pieces[0] = u'#include {0}'.format(include)
late = False # system include is never late
else:
self.pieces[0] = u'#include "{0}"'.format(include)
if verbatim:
self.pieces[self.order] = verbatim
if initial:
self.location = self.INITIAL
elif late:
self.location = self.LATE
else:
self.location = self.EARLY
def dict_update(self, d, key):
"""
Insert `self` in dict `d` with key `key`. If that key already
exists, update the attributes of the existing value with `self`.
"""
if key in d:
other = d[key]
other.location = min(self.location, other.location)
other.pieces.update(self.pieces)
else:
d[key] = self
def sortkey(self):
return self.order
def mainpiece(self):
"""
Return the main piece of C code, corresponding to the include
file. If there was no include file, return None.
"""
return self.pieces.get(0)
def write(self, code):
# Write values of self.pieces dict, sorted by the keys
for k in sorted(self.pieces):
code.putln(self.pieces[k])
def get_utility_dir():
# make this a function and not global variables:
# http://trac.cython.org/cython_trac/ticket/475
Cython_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
return os.path.join(Cython_dir, "Utility")
read_utilities_hook = None
"""
Override the hook for reading a utilities file that contains code fragments used
by the codegen.
The hook functions takes the path of the utilities file, and returns a list
of strings, one per line.
The default behavior is to open a file relative to get_utility_dir().
"""
def read_utilities_from_utility_dir(path):
"""
Read all lines of the file at the provided path from a path relative
to get_utility_dir().
"""
filename = os.path.join(get_utility_dir(), path)
with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f:
return f.readlines()
# by default, read utilities from the utility directory.
read_utilities_hook = read_utilities_from_utility_dir
class UtilityCodeBase(object):
"""
Support for loading utility code from a file.
Code sections in the file can be specified as follows:
##### MyUtility.proto #####
[proto declarations]
##### MyUtility.init #####
[code run at module initialization]
##### MyUtility #####
#@requires: MyOtherUtility
#@substitute: naming
[definitions]
##### MyUtility #####
#@subsitute: tempita
[requires tempita substitution
- context can't be specified here though so only
tempita utility that requires no external context
will benefit from this tag
- only necessary when @required from non-tempita code]
for prototypes and implementation respectively. For non-python or
-cython files backslashes should be used instead. 5 to 30 comment
characters may be used on either side.
If the @cname decorator is not used and this is a CythonUtilityCode,
one should pass in the 'name' keyword argument to be used for name
mangling of such entries.
"""
is_cython_utility = False
_utility_cache = {}
@classmethod
def _add_utility(cls, utility, type, lines, begin_lineno, tags=None):
if utility is None:
return
code = '\n'.join(lines)
if tags and 'substitute' in tags and 'naming' in tags['substitute']:
try:
code = Template(code).substitute(vars(Naming))
except (KeyError, ValueError) as e:
raise RuntimeError("Error parsing templated utility code of type '%s' at line %d: %s" % (
type, begin_lineno, e))
# remember correct line numbers at least until after templating
code = '\n' * begin_lineno + code
if type == 'proto':
utility[0] = code
elif type == 'impl':
utility[1] = code
else:
all_tags = utility[2]
all_tags[type] = code
if tags:
all_tags = utility[2]
for name, values in tags.items():
all_tags.setdefault(name, set()).update(values)
@classmethod
def load_utilities_from_file(cls, path):
utilities = cls._utility_cache.get(path)
if utilities:
return utilities
_, ext = os.path.splitext(path)
if ext in ('.pyx', '.py', '.pxd', '.pxi'):
comment = '#'
strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '')
rstrip = StringEncoding._unicode.rstrip
else:
comment = '/'
strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '')
rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1')
match_special = re.compile(
(r'^%(C)s{5,30}\s*(?P<name>(?:\w|\.)+)\s*%(C)s{5,30}|'
r'^%(C)s+@(?P<tag>\w+)\s*:\s*(?P<value>(?:\w|[.:])+)') %
{'C': comment}).match
match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match
all_lines = read_utilities_hook(path)
utilities = defaultdict(lambda: [None, None, {}])
lines = []
tags = defaultdict(set)
utility = type = None
begin_lineno = 0
for lineno, line in enumerate(all_lines):
m = match_special(line)
if m:
if m.group('name'):
cls._add_utility(utility, type, lines, begin_lineno, tags)
begin_lineno = lineno + 1
del lines[:]
tags.clear()
name = m.group('name')
mtype = match_type(name)
if mtype:
name, type = mtype.groups()
else:
type = 'impl'
utility = utilities[name]
else:
tags[m.group('tag')].add(m.group('value'))
lines.append('') # keep line number correct
else:
lines.append(rstrip(strip_comments(line)))
if utility is None:
raise ValueError("Empty utility code file")
# Don't forget to add the last utility code
cls._add_utility(utility, type, lines, begin_lineno, tags)
utilities = dict(utilities) # un-defaultdict-ify
cls._utility_cache[path] = utilities
return utilities
@classmethod
def load(cls, util_code_name, from_file, **kwargs):
"""
Load utility code from a file specified by from_file (relative to
Cython/Utility) and name util_code_name.
"""
if '::' in util_code_name:
from_file, util_code_name = util_code_name.rsplit('::', 1)
assert from_file
utilities = cls.load_utilities_from_file(from_file)
proto, impl, tags = utilities[util_code_name]
if tags:
if "substitute" in tags and "tempita" in tags["substitute"]:
if not issubclass(cls, TempitaUtilityCode):
return TempitaUtilityCode.load(util_code_name, from_file, **kwargs)
orig_kwargs = kwargs.copy()
for name, values in tags.items():
if name in kwargs:
continue
# only pass lists when we have to: most argument expect one value or None
if name == 'requires':
if orig_kwargs:
values = [cls.load(dep, from_file, **orig_kwargs)
for dep in sorted(values)]
else:
# dependencies are rarely unique, so use load_cached() when we can
values = [cls.load_cached(dep, from_file)
for dep in sorted(values)]
elif name == 'substitute':
# don't want to pass "naming" or "tempita" to the constructor
# since these will have been handled
values = values - {'naming', 'tempita'}
if not values:
continue
elif not values:
values = None
elif len(values) == 1:
values = list(values)[0]
kwargs[name] = values
if proto is not None:
kwargs['proto'] = proto
if impl is not None:
kwargs['impl'] = impl
if 'name' not in kwargs:
kwargs['name'] = util_code_name
if 'file' not in kwargs and from_file:
kwargs['file'] = from_file
return cls(**kwargs)
@classmethod
def load_cached(cls, utility_code_name, from_file, __cache={}):
"""
Calls .load(), but using a per-type cache based on utility name and file name.
"""
key = (utility_code_name, from_file, cls)
try:
return __cache[key]
except KeyError:
pass
code = __cache[key] = cls.load(utility_code_name, from_file)
return code
@classmethod
def load_as_string(cls, util_code_name, from_file, **kwargs):
"""
Load a utility code as a string. Returns (proto, implementation)
"""
util = cls.load(util_code_name, from_file, **kwargs)
proto, impl = util.proto, util.impl
return util.format_code(proto), util.format_code(impl)
def format_code(self, code_string, replace_empty_lines=re.compile(r'\n\n+').sub):
"""
Format a code section for output.
"""
if code_string:
code_string = replace_empty_lines('\n', code_string.strip()) + '\n\n'
return code_string
def __str__(self):
return "<%s(%s)>" % (type(self).__name__, self.name)
def get_tree(self, **kwargs):
pass
def __deepcopy__(self, memodict=None):
# No need to deep-copy utility code since it's essentially immutable.
return self
class UtilityCode(UtilityCodeBase):
"""
Stores utility code to add during code generation.
See GlobalState.put_utility_code.
hashes/equals by instance
proto C prototypes
impl implementation code
init code to call on module initialization
requires utility code dependencies
proto_block the place in the resulting file where the prototype should
end up
name name of the utility code (or None)
file filename of the utility code file this utility was loaded
from (or None)
"""
def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
proto_block='utility_code_proto', name=None, file=None):
# proto_block: Which code block to dump prototype in. See GlobalState.
self.proto = proto
self.impl = impl
self.init = init
self.cleanup = cleanup
self.requires = requires
self._cache = {}
self.specialize_list = []
self.proto_block = proto_block
self.name = name
self.file = file
def __hash__(self):
return hash((self.proto, self.impl))
def __eq__(self, other):
if self is other:
return True
self_type, other_type = type(self), type(other)
if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)):
return False
self_proto = getattr(self, 'proto', None)
other_proto = getattr(other, 'proto', None)
return (self_proto, self.impl) == (other_proto, other.impl)
def none_or_sub(self, s, context):
"""
Format a string in this utility code with context. If None, do nothing.
"""
if s is None:
return None
return s % context
def specialize(self, pyrex_type=None, **data):
# Dicts aren't hashable...
if pyrex_type is not None:
data['type'] = pyrex_type.empty_declaration_code()
data['type_name'] = pyrex_type.specialization_name()
key = tuple(sorted(data.items()))
try:
return self._cache[key]
except KeyError:
if self.requires is None:
requires = None
else:
requires = [r.specialize(data) for r in self.requires]
s = self._cache[key] = UtilityCode(
self.none_or_sub(self.proto, data),
self.none_or_sub(self.impl, data),
self.none_or_sub(self.init, data),
self.none_or_sub(self.cleanup, data),
requires,
self.proto_block)
self.specialize_list.append(s)
return s
def inject_string_constants(self, impl, output):
"""Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
"""
if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
return False, impl
replacements = {}
def externalise(matchobj):
key = matchobj.groups()
try:
cname = replacements[key]
except KeyError:
str_type, name = key
cname = replacements[key] = output.get_py_string_const(
StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
return cname
impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
return True, impl
def inject_unbound_methods(self, impl, output):
"""Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname.
"""
if 'CALL_UNBOUND_METHOD(' not in impl:
return False, impl
def externalise(matchobj):
type_cname, method_name, obj_cname, args = matchobj.groups()
args = [arg.strip() for arg in args[1:].split(',')] if args else []
assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
impl = re.sub(
r'CALL_UNBOUND_METHOD\('
r'([a-zA-Z_]+),' # type cname
r'\s*"([^"]+)",' # method name
r'\s*([^),]+)' # object cname
r'((?:,\s*[^),]+)*)' # args*
r'\)', externalise, impl)
assert 'CALL_UNBOUND_METHOD(' not in impl
return True, impl
def wrap_c_strings(self, impl):
"""Replace CSTRING('''xyz''') by a C compatible string
"""
if 'CSTRING(' not in impl:
return impl
def split_string(matchobj):
content = matchobj.group(1).replace('"', '\042')
return ''.join(
'"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1]
for line in content.splitlines())
impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl)
assert 'CSTRING(' not in impl
return impl
def put_code(self, output):
if self.requires:
for dependency in self.requires:
output.use_utility_code(dependency)
if self.proto:
writer = output[self.proto_block]
writer.putln("/* %s.proto */" % self.name)
writer.put_or_include(
self.format_code(self.proto), '%s_proto' % self.name)
if self.impl:
impl = self.format_code(self.wrap_c_strings(self.impl))
is_specialised1, impl = self.inject_string_constants(impl, output)
is_specialised2, impl = self.inject_unbound_methods(impl, output)
writer = output['utility_code_def']
writer.putln("/* %s */" % self.name)
if not (is_specialised1 or is_specialised2):
# no module specific adaptations => can be reused
writer.put_or_include(impl, '%s_impl' % self.name)
else:
writer.put(impl)
if self.init:
writer = output['init_globals']
writer.putln("/* %s.init */" % self.name)
if isinstance(self.init, basestring):
writer.put(self.format_code(self.init))
else:
self.init(writer, output.module_pos)
writer.putln(writer.error_goto_if_PyErr(output.module_pos))
writer.putln()
if self.cleanup and Options.generate_cleanup_code:
writer = output['cleanup_globals']
writer.putln("/* %s.cleanup */" % self.name)
if isinstance(self.cleanup, basestring):
writer.put_or_include(
self.format_code(self.cleanup),
'%s_cleanup' % self.name)
else:
self.cleanup(writer, output.module_pos)
def sub_tempita(s, context, file=None, name=None):
"Run tempita on string s with given context."
if not s:
return None
if file:
context['__name'] = "%s:%s" % (file, name)
elif name:
context['__name'] = name
from ..Tempita import sub
return sub(s, **context)
class TempitaUtilityCode(UtilityCode):
def __init__(self, name=None, proto=None, impl=None, init=None, file=None, context=None, **kwargs):
if context is None:
context = {}
proto = sub_tempita(proto, context, file, name)
impl = sub_tempita(impl, context, file, name)
init = sub_tempita(init, context, file, name)
super(TempitaUtilityCode, self).__init__(
proto, impl, init=init, name=name, file=file, **kwargs)
@classmethod
def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}):
context_key = tuple(sorted(context.items())) if context else None
assert hash(context_key) is not None # raise TypeError if not hashable
key = (cls, from_file, utility_code_name, context_key)
try:
return __cache[key]
except KeyError:
pass
code = __cache[key] = cls.load(utility_code_name, from_file, context=context)
return code
def none_or_sub(self, s, context):
"""
Format a string in this utility code with context. If None, do nothing.
"""
if s is None:
return None
return sub_tempita(s, context, self.file, self.name)
class LazyUtilityCode(UtilityCodeBase):
"""
Utility code that calls a callback with the root code writer when
available. Useful when you only have 'env' but not 'code'.
"""
__name__ = '<lazy>'
requires = None
def __init__(self, callback):
self.callback = callback
def put_code(self, globalstate):
utility = self.callback(globalstate.rootwriter)
globalstate.use_utility_code(utility)
class FunctionState(object):
# return_label string function return point label
# error_label string error catch point label
# continue_label string loop continue point label
# break_label string loop break point label
# return_from_error_cleanup_label string
# label_counter integer counter for naming labels
# in_try_finally boolean inside try of try...finally
# exc_vars (string * 3) exception variables for reraise, or None
# can_trace boolean line tracing is supported in the current context
# scope Scope the scope object of the current function
# Not used for now, perhaps later
def __init__(self, owner, names_taken=set(), scope=None):
self.names_taken = names_taken
self.owner = owner
self.scope = scope
self.error_label = None
self.label_counter = 0
self.labels_used = set()
self.return_label = self.new_label()
self.new_error_label()
self.continue_label = None
self.break_label = None
self.yield_labels = []
self.in_try_finally = 0
self.exc_vars = None
self.current_except = None
self.can_trace = False
self.gil_owned = True
self.temps_allocated = [] # of (name, type, manage_ref, static)
self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status
self.temps_used_type = {} # name -> (type, manage_ref)
self.zombie_temps = set() # temps that must not be reused after release
self.temp_counter = 0
self.closure_temps = None
# This is used to collect temporaries, useful to find out which temps
# need to be privatized in parallel sections
self.collect_temps_stack = []
# This is used for the error indicator, which needs to be local to the
# function. It used to be global, which relies on the GIL being held.
# However, exceptions may need to be propagated through 'nogil'
# sections, in which case we introduce a race condition.
self.should_declare_error_indicator = False
self.uses_error_indicator = False
# safety checks
def validate_exit(self):
# validate that all allocated temps have been freed
if self.temps_allocated:
leftovers = self.temps_in_use()
if leftovers:
msg = "TEMPGUARD: Temps left over at end of '%s': %s" % (self.scope.name, ', '.join([
'%s [%s]' % (name, ctype)
for name, ctype, is_pytemp in sorted(leftovers)]),
)
#print(msg)
raise RuntimeError(msg)
# labels
def new_label(self, name=None):
n = self.label_counter
self.label_counter = n + 1
label = "%s%d" % (Naming.label_prefix, n)
if name is not None:
label += '_' + name
return label
def new_yield_label(self, expr_type='yield'):
label = self.new_label('resume_from_%s' % expr_type)
num_and_label = (len(self.yield_labels) + 1, label)
self.yield_labels.append(num_and_label)
return num_and_label
def new_error_label(self):
old_err_lbl = self.error_label
self.error_label = self.new_label('error')
return old_err_lbl
def get_loop_labels(self):
return (
self.continue_label,
self.break_label)
def set_loop_labels(self, labels):
(self.continue_label,
self.break_label) = labels
def new_loop_labels(self):
old_labels = self.get_loop_labels()
self.set_loop_labels(
(self.new_label("continue"),
self.new_label("break")))
return old_labels
def get_all_labels(self):
return (
self.continue_label,
self.break_label,
self.return_label,
self.error_label)
def set_all_labels(self, labels):
(self.continue_label,
self.break_label,
self.return_label,
self.error_label) = labels
def all_new_labels(self):
old_labels = self.get_all_labels()
new_labels = []
for old_label, name in zip(old_labels, ['continue', 'break', 'return', 'error']):
if old_label:
new_labels.append(self.new_label(name))
else:
new_labels.append(old_label)
self.set_all_labels(new_labels)
return old_labels
def use_label(self, lbl):
self.labels_used.add(lbl)
def label_used(self, lbl):
return lbl in self.labels_used
# temp handling
def allocate_temp(self, type, manage_ref, static=False, reusable=True):
"""
Allocates a temporary (which may create a new one or get a previously
allocated and released one of the same type). Type is simply registered
and handed back, but will usually be a PyrexType.
If type.is_pyobject, manage_ref comes into play. If manage_ref is set to
True, the temp will be decref-ed on return statements and in exception
handling clauses. Otherwise the caller has to deal with any reference
counting of the variable.
If not type.is_pyobject, then manage_ref will be ignored, but it
still has to be passed. It is recommended to pass False by convention
if it is known that type will never be a Python object.
static=True marks the temporary declaration with "static".
This is only used when allocating backing store for a module-level
C array literals.
if reusable=False, the temp will not be reused after release.
A C string referring to the variable is returned.
"""
if type.is_cv_qualified and not type.is_reference:
type = type.cv_base_type
elif type.is_reference and not type.is_fake_reference:
type = type.ref_base_type
elif type.is_cfunction:
from . import PyrexTypes
type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value
elif type.is_cpp_class and self.scope.directives['cpp_locals']:
self.scope.use_utility_code(UtilityCode.load_cached("OptionalLocals", "CppSupport.cpp"))
if not type.is_pyobject and not type.is_memoryviewslice:
# Make manage_ref canonical, so that manage_ref will always mean
# a decref is needed.
manage_ref = False
freelist = self.temps_free.get((type, manage_ref))
if reusable and freelist is not None and freelist[0]:
result = freelist[0].pop()
freelist[1].remove(result)
else:
while True:
self.temp_counter += 1
result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter)
if result not in self.names_taken: break
self.temps_allocated.append((result, type, manage_ref, static))
if not reusable:
self.zombie_temps.add(result)
self.temps_used_type[result] = (type, manage_ref)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s allocated (%s)%s */" % (result, type, "" if reusable else " - zombie"))
if self.collect_temps_stack:
self.collect_temps_stack[-1].add((result, type))
return result
def release_temp(self, name):
"""
Releases a temporary so that it can be reused by other code needing
a temp of the same type.
"""
type, manage_ref = self.temps_used_type[name]
freelist = self.temps_free.get((type, manage_ref))
if freelist is None:
freelist = ([], set()) # keep order in list and make lookups in set fast
self.temps_free[(type, manage_ref)] = freelist
if name in freelist[1]:
raise RuntimeError("Temp %s freed twice!" % name)
if name not in self.zombie_temps:
freelist[0].append(name)
freelist[1].add(name)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s released %s*/" % (
name, " - zombie" if name in self.zombie_temps else ""))
def temps_in_use(self):
"""Return a list of (cname,type,manage_ref) tuples of temp names and their type
that are currently in use.
"""
used = []
for name, type, manage_ref, static in self.temps_allocated:
freelist = self.temps_free.get((type, manage_ref))
if freelist is None or name not in freelist[1]:
used.append((name, type, manage_ref and type.is_pyobject))
return used
def temps_holding_reference(self):
"""Return a list of (cname,type) tuples of temp names and their type
that are currently in use. This includes only temps of a
Python object type which owns its reference.
"""
return [(name, type)
for name, type, manage_ref in self.temps_in_use()
if manage_ref and type.is_pyobject]
def all_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python objects.
"""
return [(cname, type)
for cname, type, manage_ref, static in self.temps_allocated
if manage_ref]
def all_free_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python
objects that are not currently in use. This is used by
try-except and try-finally blocks to clean up temps in the
error case.
"""
return sorted([ # Enforce deterministic order.
(cname, type)
for (type, manage_ref), freelist in self.temps_free.items() if manage_ref
for cname in freelist[0]
])
def start_collecting_temps(self):
"""
Useful to find out which temps were used in a code block
"""
self.collect_temps_stack.append(set())
def stop_collecting_temps(self):
return self.collect_temps_stack.pop()
def init_closure_temps(self, scope):
self.closure_temps = ClosureTempAllocator(scope)
class NumConst(object):
"""Global info about a Python number constant held by GlobalState.
cname string
value string
py_type string int, long, float
value_code string evaluation code if different from value
"""
def __init__(self, cname, value, py_type, value_code=None):
self.cname = cname
self.value = value
self.py_type = py_type
self.value_code = value_code or value
class PyObjectConst(object):
"""Global info about a generic constant held by GlobalState.
"""
# cname string
# type PyrexType
def __init__(self, cname, type):
self.cname = cname
self.type = type
cython.declare(possible_unicode_identifier=object, possible_bytes_identifier=object,
replace_identifier=object, find_alphanums=object)
possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match
possible_bytes_identifier = re.compile(r"(?![0-9])\w+$".encode('ASCII')).match
replace_identifier = re.compile(r'[^a-zA-Z0-9_]+').sub
find_alphanums = re.compile('([a-zA-Z0-9]+)').findall
class StringConst(object):
"""Global info about a C string constant held by GlobalState.
"""
# cname string
# text EncodedString or BytesLiteral
# py_strings {(identifier, encoding) : PyStringConst}
def __init__(self, cname, text, byte_string):
self.cname = cname
self.text = text
self.escaped_value = StringEncoding.escape_byte_string(byte_string)
self.py_strings = None
self.py_versions = []
def add_py_version(self, version):
if not version:
self.py_versions = [2, 3]
elif version not in self.py_versions:
self.py_versions.append(version)
def get_py_string_const(self, encoding, identifier=None,
is_str=False, py3str_cstring=None):
py_strings = self.py_strings
text = self.text
is_str = bool(identifier or is_str)
is_unicode = encoding is None and not is_str
if encoding is None:
# unicode string
encoding_key = None
else:
# bytes or str
encoding = encoding.lower()
if encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii'):
encoding = None
encoding_key = None
else:
encoding_key = ''.join(find_alphanums(encoding))
key = (is_str, is_unicode, encoding_key, py3str_cstring)
if py_strings is not None:
try:
return py_strings[key]
except KeyError:
pass
else:
self.py_strings = {}
if identifier:
intern = True
elif identifier is None:
if isinstance(text, bytes):
intern = bool(possible_bytes_identifier(text))
else:
intern = bool(possible_unicode_identifier(text))
else:
intern = False
if intern:
prefix = Naming.interned_prefixes['str']
else:
prefix = Naming.py_const_prefix
if encoding_key:
encoding_prefix = '_%s' % encoding_key
else:
encoding_prefix = ''
pystring_cname = "%s%s%s_%s" % (
prefix,
(is_str and 's') or (is_unicode and 'u') or 'b',
encoding_prefix,
self.cname[len(Naming.const_prefix):])
py_string = PyStringConst(
pystring_cname, encoding, is_unicode, is_str, py3str_cstring, intern)
self.py_strings[key] = py_string
return py_string
class PyStringConst(object):
"""Global info about a Python string constant held by GlobalState.
"""
# cname string
# py3str_cstring string
# encoding string
# intern boolean
# is_unicode boolean
# is_str boolean
def __init__(self, cname, encoding, is_unicode, is_str=False,
py3str_cstring=None, intern=False):
self.cname = cname
self.py3str_cstring = py3str_cstring
self.encoding = encoding
self.is_str = is_str
self.is_unicode = is_unicode
self.intern = intern
def __lt__(self, other):
return self.cname < other.cname
class GlobalState(object):
# filename_table {string : int} for finding filename table indexes
# filename_list [string] filenames in filename table order
# input_file_contents dict contents (=list of lines) of any file that was used as input
# to create this output C code. This is
# used to annotate the comments.
#
# utility_codes set IDs of used utility code (to avoid reinsertion)
#
# declared_cnames {string:Entry} used in a transition phase to merge pxd-declared
# constants etc. into the pyx-declared ones (i.e,
# check if constants are already added).
# In time, hopefully the literals etc. will be
# supplied directly instead.
#
# const_cnames_used dict global counter for unique constant identifiers
#
# parts {string:CCodeWriter}
# interned_strings
# consts
# interned_nums
# directives set Temporary variable used to track
# the current set of directives in the code generation
# process.
directives = {}
code_layout = [
'h_code',
'filename_table',
'utility_code_proto_before_types',
'numeric_typedefs', # Let these detailed individual parts stay!,
'complex_type_declarations', # as the proper solution is to make a full DAG...
'type_declarations', # More coarse-grained blocks would simply hide
'utility_code_proto', # the ugliness, not fix it
'module_declarations',
'typeinfo',
'before_global_var',
'global_var',
'string_decls',
'decls',
'late_includes',
'module_state',
'module_state_clear',
'module_state_traverse',
'module_state_defines', # redefines names used in module_state/_clear/_traverse
'module_code', # user code goes here
'pystring_table',
'cached_builtins',
'cached_constants',
'init_globals',
'init_module',
'cleanup_globals',
'cleanup_module',
'main_method',
'utility_code_def',
'end'
]
# h files can only have a much smaller list of sections
h_code_layout = [
'h_code',
'utility_code_proto_before_types',
'type_declarations',
'utility_code_proto',
'end'
]
def __init__(self, writer, module_node, code_config, common_utility_include_dir=None):
self.filename_table = {}
self.filename_list = []
self.input_file_contents = {}
self.utility_codes = set()
self.declared_cnames = {}
self.in_utility_code_generation = False
self.code_config = code_config
self.common_utility_include_dir = common_utility_include_dir
self.parts = {}
self.module_node = module_node # because some utility code generation needs it
# (generating backwards-compatible Get/ReleaseBuffer
self.const_cnames_used = {}
self.string_const_index = {}
self.dedup_const_index = {}
self.pyunicode_ptr_const_index = {}
self.num_const_index = {}
self.py_constants = []
self.cached_cmethods = {}
self.initialised_constants = set()
writer.set_global_state(self)
self.rootwriter = writer
def initialize_main_c_code(self):
rootwriter = self.rootwriter
for i, part in enumerate(self.code_layout):
w = self.parts[part] = rootwriter.insertion_point()
if i > 0:
w.putln("/* #### Code section: %s ### */" % part)
if not Options.cache_builtins:
del self.parts['cached_builtins']
else:
w = self.parts['cached_builtins']
w.enter_cfunc_scope()
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {")
w = self.parts['cached_constants']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {")
w.put_declare_refcount_context()
w.put_setup_refcount_context(StringEncoding.EncodedString("__Pyx_InitCachedConstants"))
w = self.parts['init_globals']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {")
if not Options.generate_cleanup_code:
del self.parts['cleanup_globals']
else:
w = self.parts['cleanup_globals']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {")
code = self.parts['utility_code_proto']
code.putln("")
code.putln("/* --- Runtime support code (head) --- */")
code = self.parts['utility_code_def']
if self.code_config.emit_linenums:
code.write('\n#line 1 "cython_utility"\n')
code.putln("")
code.putln("/* --- Runtime support code --- */")
def initialize_main_h_code(self):
rootwriter = self.rootwriter
for part in self.h_code_layout:
self.parts[part] = rootwriter.insertion_point()
def finalize_main_c_code(self):
self.close_global_decls()
#
# utility_code_def
#
code = self.parts['utility_code_def']
util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c")
code.put(util.format_code(util.impl))
code.putln("")
def __getitem__(self, key):
return self.parts[key]
#
# Global constants, interned objects, etc.
#
def close_global_decls(self):
# This is called when it is known that no more global declarations will
# declared.
self.generate_const_declarations()
if Options.cache_builtins:
w = self.parts['cached_builtins']
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
w = self.parts['cached_constants']
w.put_finish_refcount_context()
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.put_finish_refcount_context()
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
w = self.parts['init_globals']
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
if Options.generate_cleanup_code:
w = self.parts['cleanup_globals']
w.putln("}")
w.exit_cfunc_scope()
if Options.generate_cleanup_code:
w = self.parts['cleanup_module']
w.putln("}")
w.exit_cfunc_scope()
def put_pyobject_decl(self, entry):
self['global_var'].putln("static PyObject *%s;" % entry.cname)
# constant handling at code generation time
def get_cached_constants_writer(self, target=None):
if target is not None:
if target in self.initialised_constants:
# Return None on second/later calls to prevent duplicate creation code.
return None
self.initialised_constants.add(target)
return self.parts['cached_constants']
def get_int_const(self, str_value, longness=False):
py_type = longness and 'long' or 'int'
try:
c = self.num_const_index[(str_value, py_type)]
except KeyError:
c = self.new_num_const(str_value, py_type)
return c
def get_float_const(self, str_value, value_code):
try:
c = self.num_const_index[(str_value, 'float')]
except KeyError:
c = self.new_num_const(str_value, 'float', value_code)
return c
def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
if dedup_key is not None:
const = self.dedup_const_index.get(dedup_key)
if const is not None:
return const
# create a new Python object constant
const = self.new_py_const(type, prefix)
if cleanup_level is not None \
and cleanup_level <= Options.generate_cleanup_code:
cleanup_writer = self.parts['cleanup_globals']
cleanup_writer.putln('Py_CLEAR(%s);' % const.cname)
if dedup_key is not None:
self.dedup_const_index[dedup_key] = const
return const
def get_string_const(self, text, py_version=None):
# return a C string constant, creating a new one if necessary
if text.is_unicode:
byte_string = text.utf8encode()
else:
byte_string = text.byteencode()
try:
c = self.string_const_index[byte_string]
except KeyError:
c = self.new_string_const(text, byte_string)
c.add_py_version(py_version)
return c
def get_pyunicode_ptr_const(self, text):
# return a Py_UNICODE[] constant, creating a new one if necessary
assert text.is_unicode
try:
c = self.pyunicode_ptr_const_index[text]
except KeyError:
c = self.pyunicode_ptr_const_index[text] = self.new_const_cname()
return c
def get_py_string_const(self, text, identifier=None,
is_str=False, unicode_value=None):
# return a Python string constant, creating a new one if necessary
py3str_cstring = None
if is_str and unicode_value is not None \
and unicode_value.utf8encode() != text.byteencode():
py3str_cstring = self.get_string_const(unicode_value, py_version=3)
c_string = self.get_string_const(text, py_version=2)
else:
c_string = self.get_string_const(text)
py_string = c_string.get_py_string_const(
text.encoding, identifier, is_str, py3str_cstring)
return py_string
def get_interned_identifier(self, text):
return self.get_py_string_const(text, identifier=True)
def new_string_const(self, text, byte_string):
cname = self.new_string_const_cname(byte_string)
c = StringConst(cname, text, byte_string)
self.string_const_index[byte_string] = c
return c
def new_num_const(self, value, py_type, value_code=None):
cname = self.new_num_const_cname(value, py_type)
c = NumConst(cname, value, py_type, value_code)
self.num_const_index[(value, py_type)] = c
return c
def new_py_const(self, type, prefix=''):
cname = self.new_const_cname(prefix)
c = PyObjectConst(cname, type)
self.py_constants.append(c)
return c
def new_string_const_cname(self, bytes_value):
# Create a new globally-unique nice name for a C string constant.
value = bytes_value.decode('ASCII', 'ignore')
return self.new_const_cname(value=value)
def new_num_const_cname(self, value, py_type):
if py_type == 'long':
value += 'L'
py_type = 'int'
prefix = Naming.interned_prefixes[py_type]
cname = "%s%s" % (prefix, value)
cname = cname.replace('+', '_').replace('-', 'neg_').replace('.', '_')
return cname
def new_const_cname(self, prefix='', value=''):
value = replace_identifier('_', value)[:32].strip('_')
used = self.const_cnames_used
name_suffix = value
while name_suffix in used:
counter = used[value] = used[value] + 1
name_suffix = '%s_%d' % (value, counter)
used[name_suffix] = 1
if prefix:
prefix = Naming.interned_prefixes[prefix]
else:
prefix = Naming.const_prefix
return "%s%s" % (prefix, name_suffix)
def get_cached_unbound_method(self, type_cname, method_name):
key = (type_cname, method_name)
try:
cname = self.cached_cmethods[key]
except KeyError:
cname = self.cached_cmethods[key] = self.new_const_cname(
'umethod', '%s_%s' % (type_cname, method_name))
return cname
def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
# admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
cache_cname = self.get_cached_unbound_method(type_cname, method_name)
args = [obj_cname] + arg_cnames
return "__Pyx_%s(&%s, %s)" % (
utility_code_name,
cache_cname,
', '.join(args),
)
def add_cached_builtin_decl(self, entry):
if entry.is_builtin and entry.is_const:
if self.should_declare(entry.cname, entry):
self.put_pyobject_decl(entry)
w = self.parts['cached_builtins']
condition = None
if entry.name in non_portable_builtins_map:
condition, replacement = non_portable_builtins_map[entry.name]
w.putln('#if %s' % condition)
self.put_cached_builtin_init(
entry.pos, StringEncoding.EncodedString(replacement),
entry.cname)
w.putln('#else')
self.put_cached_builtin_init(
entry.pos, StringEncoding.EncodedString(entry.name),
entry.cname)
if condition:
w.putln('#endif')
def put_cached_builtin_init(self, pos, name, cname):
w = self.parts['cached_builtins']
interned_cname = self.get_interned_identifier(name).cname
self.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
w.putln('%s = __Pyx_GetBuiltinName(%s); if (!%s) %s' % (
cname,
interned_cname,
cname,
w.error_goto(pos)))
def generate_const_declarations(self):
self.generate_cached_methods_decls()
self.generate_string_constants()
self.generate_num_constants()
self.generate_object_constant_decls()
def generate_object_constant_decls(self):
consts = [(len(c.cname), c.cname, c)
for c in self.py_constants]
consts.sort()
decls_writer = self.parts['decls']
decls_writer.putln("#if !CYTHON_USE_MODULE_STATE")
for _, cname, c in consts:
self.parts['module_state'].putln("%s;" % c.type.declaration_code(cname))
self.parts['module_state_defines'].putln(
"#define %s %s->%s" % (cname, Naming.modulestateglobal_cname, cname))
self.parts['module_state_clear'].putln(
"Py_CLEAR(clear_module_state->%s);" % cname)
self.parts['module_state_traverse'].putln(
"Py_VISIT(traverse_module_state->%s);" % cname)
decls_writer.putln(
"static %s;" % c.type.declaration_code(cname))
decls_writer.putln("#endif")
def generate_cached_methods_decls(self):
if not self.cached_cmethods:
return
decl = self.parts['decls']
init = self.parts['init_globals']
cnames = []
for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
cnames.append(cname)
method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
decl.putln('static __Pyx_CachedCFunction %s = {0, 0, 0, 0, 0};' % (
cname))
# split type reference storage as it might not be static
init.putln('%s.type = (PyObject*)&%s;' % (
cname, type_cname))
# method name string isn't static in limited api
init.putln('%s.method_name = &%s;' % (
cname, method_name_cname))
if Options.generate_cleanup_code:
cleanup = self.parts['cleanup_globals']
for cname in cnames:
cleanup.putln("Py_CLEAR(%s.method);" % cname)
def generate_string_constants(self):
c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()]
c_consts.sort()
py_strings = []
decls_writer = self.parts['string_decls']
for _, cname, c in c_consts:
conditional = False
if c.py_versions and (2 not in c.py_versions or 3 not in c.py_versions):
conditional = True
decls_writer.putln("#if PY_MAJOR_VERSION %s 3" % (
(2 in c.py_versions) and '<' or '>='))
decls_writer.putln('static const char %s[] = "%s";' % (
cname, StringEncoding.split_string_literal(c.escaped_value)))
if conditional:
decls_writer.putln("#endif")
if c.py_strings is not None:
for py_string in c.py_strings.values():
py_strings.append((c.cname, len(py_string.cname), py_string))
for c, cname in sorted(self.pyunicode_ptr_const_index.items()):
utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c)
if utf16_array:
# Narrow and wide representations differ
decls_writer.putln("#ifdef Py_UNICODE_WIDE")
decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf32_array))
if utf16_array:
decls_writer.putln("#else")
decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf16_array))
decls_writer.putln("#endif")
init_globals = self.parts['init_globals']
if py_strings:
self.use_utility_code(UtilityCode.load_cached("InitStrings", "StringTools.c"))
py_strings.sort()
w = self.parts['pystring_table']
w.putln("")
w.putln("static __Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname)
w.putln("#if CYTHON_USE_MODULE_STATE")
w_in_module_state = w.insertion_point()
w.putln("#else")
w_not_in_module_state = w.insertion_point()
w.putln("#endif")
decls_writer.putln("#if !CYTHON_USE_MODULE_STATE")
not_limited_api_decls_writer = decls_writer.insertion_point()
decls_writer.putln("#endif")
init_globals.putln("#if CYTHON_USE_MODULE_STATE")
init_globals_in_module_state = init_globals.insertion_point()
init_globals.putln("#endif")
for idx, py_string_args in enumerate(py_strings):
c_cname, _, py_string = py_string_args
if not py_string.is_str or not py_string.encoding or \
py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII',
'UTF8', 'UTF-8'):
encoding = '0'
else:
encoding = '"%s"' % py_string.encoding.lower()
self.parts['module_state'].putln("PyObject *%s;" % py_string.cname)
self.parts['module_state_defines'].putln("#define %s %s->%s" % (
py_string.cname,
Naming.modulestateglobal_cname,
py_string.cname))
self.parts['module_state_clear'].putln("Py_CLEAR(clear_module_state->%s);" %
py_string.cname)
self.parts['module_state_traverse'].putln("Py_VISIT(traverse_module_state->%s);" %
py_string.cname)
not_limited_api_decls_writer.putln(
"static PyObject *%s;" % py_string.cname)
if py_string.py3str_cstring:
w_not_in_module_state.putln("#if PY_MAJOR_VERSION >= 3")
w_not_in_module_state.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
py_string.cname,
py_string.py3str_cstring.cname,
py_string.py3str_cstring.cname,
'0', 1, 0,
py_string.intern
))
w_not_in_module_state.putln("#else")
w_not_in_module_state.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
py_string.cname,
c_cname,
c_cname,
encoding,
py_string.is_unicode,
py_string.is_str,
py_string.intern
))
if py_string.py3str_cstring:
w_not_in_module_state.putln("#endif")
w_in_module_state.putln("{0, %s, sizeof(%s), %s, %d, %d, %d}," % (
c_cname if not py_string.py3str_cstring else py_string.py3str_cstring.cname,
c_cname if not py_string.py3str_cstring else py_string.py3str_cstring.cname,
encoding if not py_string.py3str_cstring else '0',
py_string.is_unicode,
py_string.is_str,
py_string.intern
))
init_globals_in_module_state.putln("if (__Pyx_InitString(%s[%d], &%s) < 0) %s;" % (
Naming.stringtab_cname,
idx,
py_string.cname,
init_globals.error_goto(self.module_pos)))
w.putln("{0, 0, 0, 0, 0, 0, 0}")
w.putln("};")
init_globals.putln("#if !CYTHON_USE_MODULE_STATE")
init_globals.putln(
"if (__Pyx_InitStrings(%s) < 0) %s;" % (
Naming.stringtab_cname,
init_globals.error_goto(self.module_pos)))
init_globals.putln("#endif")
def generate_num_constants(self):
consts = [(c.py_type, c.value[0] == '-', len(c.value), c.value, c.value_code, c)
for c in self.num_const_index.values()]
consts.sort()
decls_writer = self.parts['decls']
decls_writer.putln("#if !CYTHON_USE_MODULE_STATE")
init_globals = self.parts['init_globals']
for py_type, _, _, value, value_code, c in consts:
cname = c.cname
self.parts['module_state'].putln("PyObject *%s;" % cname)
self.parts['module_state_defines'].putln("#define %s %s->%s" % (
cname, Naming.modulestateglobal_cname, cname))
self.parts['module_state_clear'].putln(
"Py_CLEAR(clear_module_state->%s);" % cname)
self.parts['module_state_traverse'].putln(
"Py_VISIT(traverse_module_state->%s);" % cname)
decls_writer.putln("static PyObject *%s;" % cname)
if py_type == 'float':
function = 'PyFloat_FromDouble(%s)'
elif py_type == 'long':
function = 'PyLong_FromString((char *)"%s", 0, 0)'
elif Utils.long_literal(value):
function = 'PyInt_FromString((char *)"%s", 0, 0)'
elif len(value.lstrip('-')) > 4:
function = "PyInt_FromLong(%sL)"
else:
function = "PyInt_FromLong(%s)"
init_globals.putln('%s = %s; %s' % (
cname, function % value_code,
init_globals.error_goto_if_null(cname, self.module_pos)))
decls_writer.putln("#endif")
# The functions below are there in a transition phase only
# and will be deprecated. They are called from Nodes.BlockNode.
# The copy&paste duplication is intentional in order to be able
# to see quickly how BlockNode worked, until this is replaced.
def should_declare(self, cname, entry):
if cname in self.declared_cnames:
other = self.declared_cnames[cname]
assert str(entry.type) == str(other.type)
assert entry.init == other.init
return False
else:
self.declared_cnames[cname] = entry
return True
#
# File name state
#
def lookup_filename(self, source_desc):
entry = source_desc.get_filenametable_entry()
try:
index = self.filename_table[entry]
except KeyError:
index = len(self.filename_list)
self.filename_list.append(source_desc)
self.filename_table[entry] = index
return index
def commented_file_contents(self, source_desc):
try:
return self.input_file_contents[source_desc]
except KeyError:
pass
source_file = source_desc.get_lines(encoding='ASCII',
error_handling='ignore')
try:
F = [u' * ' + line.rstrip().replace(
u'*/', u'*[inserted by cython to avoid comment closer]/'
).replace(
u'/*', u'/[inserted by cython to avoid comment start]*'
)
for line in source_file]
finally:
if hasattr(source_file, 'close'):
source_file.close()
if not F: F.append(u'')
self.input_file_contents[source_desc] = F
return F
#
# Utility code state
#
def use_utility_code(self, utility_code):
"""
Adds code to the C file. utility_code should
a) implement __eq__/__hash__ for the purpose of knowing whether the same
code has already been included
b) implement put_code, which takes a globalstate instance
See UtilityCode.
"""
if utility_code and utility_code not in self.utility_codes:
self.utility_codes.add(utility_code)
utility_code.put_code(self)
def use_entry_utility_code(self, entry):
if entry is None:
return
if entry.utility_code:
self.use_utility_code(entry.utility_code)
if entry.utility_code_definition:
self.use_utility_code(entry.utility_code_definition)
def funccontext_property(func):
name = func.__name__
attribute_of = operator.attrgetter(name)
def get(self):
return attribute_of(self.funcstate)
def set(self, value):
setattr(self.funcstate, name, value)
return property(get, set)
class CCodeConfig(object):
# emit_linenums boolean write #line pragmas?
# emit_code_comments boolean copy the original code into C comments?
# c_line_in_traceback boolean append the c file and line number to the traceback for exceptions?
def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True):
self.emit_code_comments = emit_code_comments
self.emit_linenums = emit_linenums
self.c_line_in_traceback = c_line_in_traceback
class CCodeWriter(object):
"""
Utility class to output C code.
When creating an insertion point one must care about the state that is
kept:
- formatting state (level, bol) is cloned and used in insertion points
as well
- labels, temps, exc_vars: One must construct a scope in which these can
exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
sanity checking and forward compatibility). Created insertion points
looses this scope and cannot access it.
- marker: Not copied to insertion point
- filename_table, filename_list, input_file_contents: All codewriters
coming from the same root share the same instances simultaneously.
"""
# f file output file
# buffer StringIOTree
# level int indentation level
# bol bool beginning of line?
# marker string comment to emit before next line
# funcstate FunctionState contains state local to a C function used for code
# generation (labels and temps state etc.)
# globalstate GlobalState contains state global for a C file (input file info,
# utility code, declared constants etc.)
# pyclass_stack list used during recursive code generation to pass information
# about the current class one is in
# code_config CCodeConfig configuration options for the C code writer
@cython.locals(create_from='CCodeWriter')
def __init__(self, create_from=None, buffer=None, copy_formatting=False):
if buffer is None: buffer = StringIOTree()
self.buffer = buffer
self.last_pos = None
self.last_marked_pos = None
self.pyclass_stack = []
self.funcstate = None
self.globalstate = None
self.code_config = None
self.level = 0
self.call_level = 0
self.bol = 1
if create_from is not None:
# Use same global state
self.set_global_state(create_from.globalstate)
self.funcstate = create_from.funcstate
# Clone formatting state
if copy_formatting:
self.level = create_from.level
self.bol = create_from.bol
self.call_level = create_from.call_level
self.last_pos = create_from.last_pos
self.last_marked_pos = create_from.last_marked_pos
def create_new(self, create_from, buffer, copy_formatting):
# polymorphic constructor -- very slightly more versatile
# than using __class__
result = CCodeWriter(create_from, buffer, copy_formatting)
return result
def set_global_state(self, global_state):
assert self.globalstate is None # prevent overwriting once it's set
self.globalstate = global_state
self.code_config = global_state.code_config
def copyto(self, f):
self.buffer.copyto(f)
def getvalue(self):
return self.buffer.getvalue()
def write(self, s):
# Cygdb needs to know which Cython source line corresponds to which C line.
# Therefore, we write this information into "self.buffer.markers" and then write it from there
# into cython_debug/cython_debug_info_* (see ModuleNode._serialize_lineno_map).
filename_line = self.last_marked_pos[:2] if self.last_marked_pos else (None, 0)
self.buffer.markers.extend([filename_line] * s.count('\n'))
self.buffer.write(s)
def insertion_point(self):
other = self.create_new(create_from=self, buffer=self.buffer.insertion_point(), copy_formatting=True)
return other
def new_writer(self):
"""
Creates a new CCodeWriter connected to the same global state, which
can later be inserted using insert.
"""
return CCodeWriter(create_from=self)
def insert(self, writer):
"""
Inserts the contents of another code writer (created with
the same global state) in the current location.
It is ok to write to the inserted writer also after insertion.
"""
assert writer.globalstate is self.globalstate
self.buffer.insert(writer.buffer)
# Properties delegated to function scope
@funccontext_property
def label_counter(self): pass
@funccontext_property
def return_label(self): pass
@funccontext_property
def error_label(self): pass
@funccontext_property
def labels_used(self): pass
@funccontext_property
def continue_label(self): pass
@funccontext_property
def break_label(self): pass
@funccontext_property
def return_from_error_cleanup_label(self): pass
@funccontext_property
def yield_labels(self): pass
# Functions delegated to function scope
def new_label(self, name=None): return self.funcstate.new_label(name)
def new_error_label(self): return self.funcstate.new_error_label()
def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args)
def get_loop_labels(self): return self.funcstate.get_loop_labels()
def set_loop_labels(self, labels): return self.funcstate.set_loop_labels(labels)
def new_loop_labels(self): return self.funcstate.new_loop_labels()
def get_all_labels(self): return self.funcstate.get_all_labels()
def set_all_labels(self, labels): return self.funcstate.set_all_labels(labels)
def all_new_labels(self): return self.funcstate.all_new_labels()
def use_label(self, lbl): return self.funcstate.use_label(lbl)
def label_used(self, lbl): return self.funcstate.label_used(lbl)
def enter_cfunc_scope(self, scope=None):
self.funcstate = FunctionState(self, scope=scope)
def exit_cfunc_scope(self):
self.funcstate.validate_exit()
self.funcstate = None
# constant handling
def get_py_int(self, str_value, longness):
return self.globalstate.get_int_const(str_value, longness).cname
def get_py_float(self, str_value, value_code):
return self.globalstate.get_float_const(str_value, value_code).cname
def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname
def get_string_const(self, text):
return self.globalstate.get_string_const(text).cname
def get_pyunicode_ptr_const(self, text):
return self.globalstate.get_pyunicode_ptr_const(text)
def get_py_string_const(self, text, identifier=None,
is_str=False, unicode_value=None):
return self.globalstate.get_py_string_const(
text, identifier, is_str, unicode_value).cname
def get_argument_default_const(self, type):
return self.globalstate.get_py_const(type).cname
def intern(self, text):
return self.get_py_string_const(text)
def intern_identifier(self, text):
return self.get_py_string_const(text, identifier=True)
def get_cached_constants_writer(self, target=None):
return self.globalstate.get_cached_constants_writer(target)
# code generation
def putln(self, code="", safe=False):
if self.last_pos and self.bol:
self.emit_marker()
if self.code_config.emit_linenums and self.last_marked_pos:
source_desc, line, _ = self.last_marked_pos
self.write('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description()))
if code:
if safe:
self.put_safe(code)
else:
self.put(code)
self.write("\n")
self.bol = 1
def mark_pos(self, pos, trace=True):
if pos is None:
return
if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]:
return
self.last_pos = (pos, trace)
def emit_marker(self):
pos, trace = self.last_pos
self.last_marked_pos = pos
self.last_pos = None
self.write("\n")
if self.code_config.emit_code_comments:
self.indent()
self.write("/* %s */\n" % self._build_marker(pos))
if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']:
self.indent()
self.write('__Pyx_TraceLine(%d,%d,%s)\n' % (
pos[1], not self.funcstate.gil_owned, self.error_goto(pos)))
def _build_marker(self, pos):
source_desc, line, col = pos
assert isinstance(source_desc, SourceDescriptor)
contents = self.globalstate.commented_file_contents(source_desc)
lines = contents[max(0, line-3):line] # line numbers start at 1
lines[-1] += u' # <<<<<<<<<<<<<<'
lines += contents[line:line+2]
return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines))
def put_safe(self, code):
# put code, but ignore {}
self.write(code)
self.bol = 0
def put_or_include(self, code, name):
include_dir = self.globalstate.common_utility_include_dir
if include_dir and len(code) > 1024:
include_file = "%s_%s.h" % (
name, hashlib.sha1(code.encode('utf8')).hexdigest())
path = os.path.join(include_dir, include_file)
if not os.path.exists(path):
tmp_path = '%s.tmp%s' % (path, os.getpid())
with closing(Utils.open_new_file(tmp_path)) as f:
f.write(code)
shutil.move(tmp_path, path)
code = '#include "%s"\n' % path
self.put(code)
def put(self, code):
fix_indent = False
if "{" in code:
dl = code.count("{")
else:
dl = 0
if "}" in code:
dl -= code.count("}")
if dl < 0:
self.level += dl
elif dl == 0 and code[0] == "}":
# special cases like "} else {" need a temporary dedent
fix_indent = True
self.level -= 1
if self.bol:
self.indent()
self.write(code)
self.bol = 0
if dl > 0:
self.level += dl
elif fix_indent:
self.level += 1
def putln_tempita(self, code, **context):
from ..Tempita import sub
self.putln(sub(code, **context))
def put_tempita(self, code, **context):
from ..Tempita import sub
self.put(sub(code, **context))
def increase_indent(self):
self.level += 1
def decrease_indent(self):
self.level -= 1
def begin_block(self):
self.putln("{")
self.increase_indent()
def end_block(self):
self.decrease_indent()
self.putln("}")
def indent(self):
self.write(" " * self.level)
def get_py_version_hex(self, pyversion):
return "0x%02X%02X%02X%02X" % (tuple(pyversion) + (0,0,0,0))[:4]
def put_label(self, lbl):
if lbl in self.funcstate.labels_used:
self.putln("%s:;" % lbl)
def put_goto(self, lbl):
self.funcstate.use_label(lbl)
self.putln("goto %s;" % lbl)
def put_var_declaration(self, entry, storage_class="",
dll_linkage=None, definition=True):
#print "Code.put_var_declaration:", entry.name, "definition =", definition ###
if entry.visibility == 'private' and not (definition or entry.defined_in_pxd):
#print "...private and not definition, skipping", entry.cname ###
return
if entry.visibility == "private" and not entry.used:
#print "...private and not used, skipping", entry.cname ###
return
if storage_class:
self.put("%s " % storage_class)
if not entry.cf_used:
self.put('CYTHON_UNUSED ')
if entry.is_cpp_optional:
self.put(entry.type.cpp_optional_declaration_code(
entry.cname, dll_linkage=dll_linkage))
else:
self.put(entry.type.declaration_code(
entry.cname, dll_linkage=dll_linkage))
if entry.init is not None:
self.put_safe(" = %s" % entry.type.literal_code(entry.init))
elif entry.type.is_pyobject:
self.put(" = NULL")
self.putln(";")
self.funcstate.scope.use_entry_utility_code(entry)
def put_temp_declarations(self, func_context):
for name, type, manage_ref, static in func_context.temps_allocated:
if type.is_cpp_class and func_context.scope.directives['cpp_locals']:
decl = type.cpp_optional_declaration_code(name)
else:
decl = type.declaration_code(name)
if type.is_pyobject:
self.putln("%s = NULL;" % decl)
elif type.is_memoryviewslice:
self.putln("%s = %s;" % (decl, type.literal_code(type.default_value)))
else:
self.putln("%s%s;" % (static and "static " or "", decl))
if func_context.should_declare_error_indicator:
if self.funcstate.uses_error_indicator:
unused = ''
else:
unused = 'CYTHON_UNUSED '
# Initialize these variables to silence compiler warnings
self.putln("%sint %s = 0;" % (unused, Naming.lineno_cname))
self.putln("%sconst char *%s = NULL;" % (unused, Naming.filename_cname))
self.putln("%sint %s = 0;" % (unused, Naming.clineno_cname))
def put_generated_by(self):
self.putln("/* Generated by Cython %s */" % Version.watermark)
self.putln("")
def put_h_guard(self, guard):
self.putln("#ifndef %s" % guard)
self.putln("#define %s" % guard)
def unlikely(self, cond):
if Options.gcc_branch_hints:
return 'unlikely(%s)' % cond
else:
return cond
def build_function_modifiers(self, modifiers, mapper=modifier_output_mapper):
if not modifiers:
return ''
return '%s ' % ' '.join([mapper(m,m) for m in modifiers])
# Python objects and reference counting
def entry_as_pyobject(self, entry):
type = entry.type
if (not entry.is_self_arg and not entry.type.is_complete()
or entry.type.is_extension_type):
return "(PyObject *)" + entry.cname
else:
return entry.cname
def as_pyobject(self, cname, type):
from .PyrexTypes import py_object_type, typecast
return typecast(py_object_type, type, cname)
def put_gotref(self, cname, type):
type.generate_gotref(self, cname)
def put_giveref(self, cname, type):
type.generate_giveref(self, cname)
def put_xgiveref(self, cname, type):
type.generate_xgiveref(self, cname)
def put_xgotref(self, cname, type):
type.generate_xgotref(self, cname)
def put_incref(self, cname, type, nanny=True):
# Note: original put_Memslice_Incref/Decref also added in some utility code
# this is unnecessary since the relevant utility code is loaded anyway if a memoryview is used
# and so has been removed. However, it's potentially a feature that might be useful here
type.generate_incref(self, cname, nanny=nanny)
def put_xincref(self, cname, type, nanny=True):
type.generate_xincref(self, cname, nanny=nanny)
def put_decref(self, cname, type, nanny=True, have_gil=True):
type.generate_decref(self, cname, nanny=nanny, have_gil=have_gil)
def put_xdecref(self, cname, type, nanny=True, have_gil=True):
type.generate_xdecref(self, cname, nanny=nanny, have_gil=have_gil)
def put_decref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True):
type.generate_decref_clear(self, cname, clear_before_decref=clear_before_decref,
nanny=nanny, have_gil=have_gil)
def put_xdecref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True):
type.generate_xdecref_clear(self, cname, clear_before_decref=clear_before_decref,
nanny=nanny, have_gil=have_gil)
def put_decref_set(self, cname, type, rhs_cname):
type.generate_decref_set(self, cname, rhs_cname)
def put_xdecref_set(self, cname, type, rhs_cname):
type.generate_xdecref_set(self, cname, rhs_cname)
def put_incref_memoryviewslice(self, slice_cname, type, have_gil):
# TODO ideally this would just be merged into "put_incref"
type.generate_incref_memoryviewslice(self, slice_cname, have_gil=have_gil)
def put_var_incref_memoryviewslice(self, entry, have_gil):
self.put_incref_memoryviewslice(entry.cname, entry.type, have_gil=have_gil)
def put_var_gotref(self, entry):
self.put_gotref(entry.cname, entry.type)
def put_var_giveref(self, entry):
self.put_giveref(entry.cname, entry.type)
def put_var_xgotref(self, entry):
self.put_xgotref(entry.cname, entry.type)
def put_var_xgiveref(self, entry):
self.put_xgiveref(entry.cname, entry.type)
def put_var_incref(self, entry, **kwds):
self.put_incref(entry.cname, entry.type, **kwds)
def put_var_xincref(self, entry, **kwds):
self.put_xincref(entry.cname, entry.type, **kwds)
def put_var_decref(self, entry, **kwds):
self.put_decref(entry.cname, entry.type, **kwds)
def put_var_xdecref(self, entry, **kwds):
self.put_xdecref(entry.cname, entry.type, **kwds)
def put_var_decref_clear(self, entry, **kwds):
self.put_decref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds)
def put_var_decref_set(self, entry, rhs_cname, **kwds):
self.put_decref_set(entry.cname, entry.type, rhs_cname, **kwds)
def put_var_xdecref_set(self, entry, rhs_cname, **kwds):
self.put_xdecref_set(entry.cname, entry.type, rhs_cname, **kwds)
def put_var_xdecref_clear(self, entry, **kwds):
self.put_xdecref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds)
def put_var_decrefs(self, entries, used_only = 0):
for entry in entries:
if not used_only or entry.used:
if entry.xdecref_cleanup:
self.put_var_xdecref(entry)
else:
self.put_var_decref(entry)
def put_var_xdecrefs(self, entries):
for entry in entries:
self.put_var_xdecref(entry)
def put_var_xdecrefs_clear(self, entries):
for entry in entries:
self.put_var_xdecref_clear(entry)
def put_init_to_py_none(self, cname, type, nanny=True):
from .PyrexTypes import py_object_type, typecast
py_none = typecast(type, py_object_type, "Py_None")
if nanny:
self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none))
else:
self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none))
def put_init_var_to_py_none(self, entry, template = "%s", nanny=True):
code = template % entry.cname
#if entry.type.is_extension_type:
# code = "((PyObject*)%s)" % code
self.put_init_to_py_none(code, entry.type, nanny)
if entry.in_closure:
self.put_giveref('Py_None')
def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None):
if entry.is_special or entry.name == '__getattribute__':
if entry.name not in special_py_methods:
if entry.name == '__getattr__' and not self.globalstate.directives['fast_getattr']:
pass
# Python's typeobject.c will automatically fill in our slot
# in add_operators() (called by PyType_Ready) with a value
# that's better than ours.
elif allow_skip:
return
method_flags = entry.signature.method_flags()
if not method_flags:
return
if entry.is_special:
from . import TypeSlots
method_flags += [TypeSlots.method_coexist]
func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname
# Add required casts, but try not to shadow real warnings.
cast = entry.signature.method_function_type()
if cast != 'PyCFunction':
func_ptr = '(void*)(%s)%s' % (cast, func_ptr)
entry_name = entry.name.as_c_string_literal()
self.putln(
'{%s, (PyCFunction)%s, %s, %s}%s' % (
entry_name,
func_ptr,
"|".join(method_flags),
entry.doc_cname if entry.doc else '0',
term))
def put_pymethoddef_wrapper(self, entry):
func_cname = entry.func_cname
if entry.is_special:
method_flags = entry.signature.method_flags() or []
from .TypeSlots import method_noargs
if method_noargs in method_flags:
# Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one.
func_cname = Naming.method_wrapper_prefix + func_cname
self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {return %s(self);}" % (
func_cname, entry.func_cname))
return func_cname
# GIL methods
def use_fast_gil_utility_code(self):
if self.globalstate.directives['fast_gil']:
self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
else:
self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
def put_ensure_gil(self, declare_gilstate=True, variable=None):
"""
Acquire the GIL. The generated code is safe even when no PyThreadState
has been allocated for this thread (for threads not initialized by
using the Python API). Additionally, the code generated by this method
may be called recursively.
"""
self.globalstate.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
self.use_fast_gil_utility_code()
self.putln("#ifdef WITH_THREAD")
if not variable:
variable = '__pyx_gilstate_save'
if declare_gilstate:
self.put("PyGILState_STATE ")
self.putln("%s = __Pyx_PyGILState_Ensure();" % variable)
self.putln("#endif")
def put_release_ensured_gil(self, variable=None):
"""
Releases the GIL, corresponds to `put_ensure_gil`.
"""
self.use_fast_gil_utility_code()
if not variable:
variable = '__pyx_gilstate_save'
self.putln("#ifdef WITH_THREAD")
self.putln("__Pyx_PyGILState_Release(%s);" % variable)
self.putln("#endif")
def put_acquire_gil(self, variable=None):
"""
Acquire the GIL. The thread's thread state must have been initialized
by a previous `put_release_gil`
"""
self.use_fast_gil_utility_code()
self.putln("#ifdef WITH_THREAD")
self.putln("__Pyx_FastGIL_Forget();")
if variable:
self.putln('_save = %s;' % variable)
self.putln("Py_BLOCK_THREADS")
self.putln("#endif")
def put_release_gil(self, variable=None):
"Release the GIL, corresponds to `put_acquire_gil`."
self.use_fast_gil_utility_code()
self.putln("#ifdef WITH_THREAD")
self.putln("PyThreadState *_save;")
self.putln("Py_UNBLOCK_THREADS")
if variable:
self.putln('%s = _save;' % variable)
self.putln("__Pyx_FastGIL_Remember();")
self.putln("#endif")
def declare_gilstate(self):
self.putln("#ifdef WITH_THREAD")
self.putln("PyGILState_STATE __pyx_gilstate_save;")
self.putln("#endif")
# error handling
def put_error_if_neg(self, pos, value):
# TODO this path is almost _never_ taken, yet this macro makes is slower!
# return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos)))
return self.putln("if (%s < 0) %s" % (value, self.error_goto(pos)))
def put_error_if_unbound(self, pos, entry, in_nogil_context=False, unbound_check_code=None):
if entry.from_closure:
func = '__Pyx_RaiseClosureNameError'
self.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseClosureNameError", "ObjectHandling.c"))
elif entry.type.is_memoryviewslice and in_nogil_context:
func = '__Pyx_RaiseUnboundMemoryviewSliceNogil'
self.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseUnboundMemoryviewSliceNogil", "ObjectHandling.c"))
elif entry.type.is_cpp_class and entry.is_cglobal:
func = '__Pyx_RaiseCppGlobalNameError'
self.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseCppGlobalNameError", "ObjectHandling.c"))
elif entry.type.is_cpp_class and entry.is_variable and not entry.is_member and entry.scope.is_c_class_scope:
# there doesn't seem to be a good way to detecting an instance-attribute of a C class
# (is_member is only set for class attributes)
func = '__Pyx_RaiseCppAttributeError'
self.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseCppAttributeError", "ObjectHandling.c"))
else:
func = '__Pyx_RaiseUnboundLocalError'
self.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseUnboundLocalError", "ObjectHandling.c"))
if not unbound_check_code:
unbound_check_code = entry.type.check_for_null_code(entry.cname)
self.putln('if (unlikely(!%s)) { %s("%s"); %s }' % (
unbound_check_code,
func,
entry.name,
self.error_goto(pos)))
def set_error_info(self, pos, used=False):
self.funcstate.should_declare_error_indicator = True
if used:
self.funcstate.uses_error_indicator = True
return "__PYX_MARK_ERR_POS(%s, %s)" % (
self.lookup_filename(pos[0]),
pos[1])
def error_goto(self, pos, used=True):
lbl = self.funcstate.error_label
self.funcstate.use_label(lbl)
if pos is None:
return 'goto %s;' % lbl
self.funcstate.should_declare_error_indicator = True
if used:
self.funcstate.uses_error_indicator = True
return "__PYX_ERR(%s, %s, %s)" % (
self.lookup_filename(pos[0]),
pos[1],
lbl)
def error_goto_if(self, cond, pos):
return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos))
def error_goto_if_null(self, cname, pos):
return self.error_goto_if("!%s" % cname, pos)
def error_goto_if_neg(self, cname, pos):
# Add extra parentheses to silence clang warnings about constant conditions.
return self.error_goto_if("(%s < 0)" % cname, pos)
def error_goto_if_PyErr(self, pos):
return self.error_goto_if("PyErr_Occurred()", pos)
def lookup_filename(self, filename):
return self.globalstate.lookup_filename(filename)
def put_declare_refcount_context(self):
self.putln('__Pyx_RefNannyDeclarations')
def put_setup_refcount_context(self, name, acquire_gil=False):
name = name.as_c_string_literal() # handle unicode names
if acquire_gil:
self.globalstate.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
self.putln('__Pyx_RefNannySetupContext(%s, %d);' % (name, acquire_gil and 1 or 0))
def put_finish_refcount_context(self, nogil=False):
self.putln("__Pyx_RefNannyFinishContextNogil()" if nogil else "__Pyx_RefNannyFinishContext();")
def put_add_traceback(self, qualified_name, include_cline=True):
"""
Build a Python traceback for propagating exceptions.
qualified_name should be the qualified name of the function.
"""
qualified_name = qualified_name.as_c_string_literal() # handle unicode names
format_tuple = (
qualified_name,
Naming.clineno_cname if include_cline else 0,
Naming.lineno_cname,
Naming.filename_cname,
)
self.funcstate.uses_error_indicator = True
self.putln('__Pyx_AddTraceback(%s, %s, %s, %s);' % format_tuple)
def put_unraisable(self, qualified_name, nogil=False):
"""
Generate code to print a Python warning for an unraisable exception.
qualified_name should be the qualified name of the function.
"""
format_tuple = (
qualified_name,
Naming.clineno_cname,
Naming.lineno_cname,
Naming.filename_cname,
self.globalstate.directives['unraisable_tracebacks'],
nogil,
)
self.funcstate.uses_error_indicator = True
self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple)
self.globalstate.use_utility_code(
UtilityCode.load_cached("WriteUnraisableException", "Exceptions.c"))
def put_trace_declarations(self):
self.putln('__Pyx_TraceDeclarations')
def put_trace_frame_init(self, codeobj=None):
if codeobj:
self.putln('__Pyx_TraceFrameInit(%s)' % codeobj)
def put_trace_call(self, name, pos, nogil=False):
self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % (
name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos)))
def put_trace_exception(self):
self.putln("__Pyx_TraceException();")
def put_trace_return(self, retvalue_cname, nogil=False):
self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil))
def putln_openmp(self, string):
self.putln("#ifdef _OPENMP")
self.putln(string)
self.putln("#endif /* _OPENMP */")
def undef_builtin_expect(self, cond):
"""
Redefine the macros likely() and unlikely to no-ops, depending on
condition 'cond'
"""
self.putln("#if %s" % cond)
self.putln(" #undef likely")
self.putln(" #undef unlikely")
self.putln(" #define likely(x) (x)")
self.putln(" #define unlikely(x) (x)")
self.putln("#endif")
def redef_builtin_expect(self, cond):
self.putln("#if %s" % cond)
self.putln(" #undef likely")
self.putln(" #undef unlikely")
self.putln(" #define likely(x) __builtin_expect(!!(x), 1)")
self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
self.putln("#endif")
class PyrexCodeWriter(object):
# f file output file
# level int indentation level
def __init__(self, outfile_name):
self.f = Utils.open_new_file(outfile_name)
self.level = 0
def putln(self, code):
self.f.write("%s%s\n" % (" " * self.level, code))
def indent(self):
self.level += 1
def dedent(self):
self.level -= 1
class PyxCodeWriter(object):
"""
Can be used for writing out some Cython code. To use the indenter
functionality, the Cython.Compiler.Importer module will have to be used
to load the code to support python 2.4
"""
def __init__(self, buffer=None, indent_level=0, context=None, encoding='ascii'):
self.buffer = buffer or StringIOTree()
self.level = indent_level
self.context = context
self.encoding = encoding
def indent(self, levels=1):
self.level += levels
return True
def dedent(self, levels=1):
self.level -= levels
def indenter(self, line):
"""
Instead of
with pyx_code.indenter("for i in range(10):"):
pyx_code.putln("print i")
write
if pyx_code.indenter("for i in range(10);"):
pyx_code.putln("print i")
pyx_code.dedent()
"""
self.putln(line)
self.indent()
return True
def getvalue(self):
result = self.buffer.getvalue()
if isinstance(result, bytes):
result = result.decode(self.encoding)
return result
def putln(self, line, context=None):
context = context or self.context
if context:
line = sub_tempita(line, context)
self._putln(line)
def _putln(self, line):
self.buffer.write("%s%s\n" % (self.level * " ", line))
def put_chunk(self, chunk, context=None):
context = context or self.context
if context:
chunk = sub_tempita(chunk, context)
chunk = textwrap.dedent(chunk)
for line in chunk.splitlines():
self._putln(line)
def insertion_point(self):
return PyxCodeWriter(self.buffer.insertion_point(), self.level,
self.context)
def named_insertion_point(self, name):
setattr(self, name, self.insertion_point())
class ClosureTempAllocator(object):
def __init__(self, klass):
self.klass = klass
self.temps_allocated = {}
self.temps_free = {}
self.temps_count = 0
def reset(self):
for type, cnames in self.temps_allocated.items():
self.temps_free[type] = list(cnames)
def allocate_temp(self, type):
if type not in self.temps_allocated:
self.temps_allocated[type] = []
self.temps_free[type] = []
elif self.temps_free[type]:
return self.temps_free[type].pop(0)
cname = '%s%d' % (Naming.codewriter_temp_prefix, self.temps_count)
self.klass.declare_var(pos=None, name=cname, cname=cname, type=type, is_cdef=True)
self.temps_allocated[type].append(cname)
self.temps_count += 1
return cname
| 38.013989 | 116 | 0.600326 |
acf127bd561e3aaa623ed4e79d27aefda26efda7 | 3,632 | py | Python | txdlo/dlo.py | terrycojones/txdlo | 3e4c48772b77e2e36faf39ce23021da36a6939e8 | [
"Apache-2.0"
] | 2 | 2015-04-10T12:08:21.000Z | 2016-07-01T09:14:09.000Z | txdlo/dlo.py | terrycojones/txdlo | 3e4c48772b77e2e36faf39ce23021da36a6939e8 | [
"Apache-2.0"
] | 1 | 2018-07-23T18:02:03.000Z | 2018-07-23T18:02:03.000Z | txdlo/dlo.py | terrycojones/txdlo | 3e4c48772b77e2e36faf39ce23021da36a6939e8 | [
"Apache-2.0"
] | null | null | null | class DeferredListObserver(object):
"""
Call a list of observer functions with information about firing events
that occur on a set of deferreds. Observers are called with event
information in the order they are added (via C{observe}).
@param maintainHistory: if C{True} a history of all events is maintained.
This can be replayed to newly added observers and is accessible to
class instances. If C{False}, the default, no history is kept.
@ivar history: a C{list} of (index, success, value) tuples, in the order
that the deferreds in the set fired (this will generally not be the
order in which the deferreds are added to the set). The history
attribute will only exist if C{maintainHistory} (above) is C{True}.
@ivar successCount: the number of observed deferreds that have been called
successfully.
@ivar failureCount: the number of observed deferreds that have been
errored.
@ivar pendingCount: the number of observed deferreds that have not yet been
called or errored.
"""
def __init__(self, maintainHistory=False):
self._maintainHistory = maintainHistory
if maintainHistory:
self.history = []
self.successCount = self.failureCount = self.pendingCount = 0
self._observers = []
def _makeCallbacks(self, index):
def callback(value):
self.pendingCount -= 1
self.successCount += 1
event = (index, True, value)
if self._maintainHistory:
self.history.append(event)
for observer in self._observers:
observer(*event)
return value
def errback(value):
self.pendingCount -= 1
self.failureCount += 1
event = (index, False, value)
if self._maintainHistory:
self.history.append(event)
for observer in self._observers:
observer(*event)
return value
return (callback, errback)
def append(self, deferred):
"""
Monitor a deferred.
@param deferred: An instance of L{twisted.internet.defer.Deferred}.
@return: the passed deferred.
"""
index = self.successCount + self.failureCount + self.pendingCount
callback, errback = self._makeCallbacks(index)
self.pendingCount += 1
return deferred.addCallbacks(callback, errback)
def observe(self, observer, replayHistory=False):
"""
Add an observer function that will be called (as below) with details
of deferred firings.
@param observer: a C{function} that will be called with 3 arguments
each time one of the observed deferreds in the set fires. The
arguments will be:
- The index of the deferred that fired.
- C{True} if the deferred was called, C{False} if it errored.
- The value passed to the callback or errback.
@param replayHistory: if C{True}, the history of deferred firings
that occurred prior to this observer being added will be sent
to the observer. If no history is being maintained, C{RuntimeError}
will be raised.
"""
if replayHistory:
if self._maintainHistory:
for event in self.history:
observer(*event)
else:
raise RuntimeError('Cannot replay non-existent event history '
'to new observer')
self._observers.append(observer)
| 40.355556 | 79 | 0.618117 |
acf12968ed5c7a081b7c0f74cef41bf5fc2039d4 | 19,079 | py | Python | traincrf.py | chaitanyamalaviya/NeuralFactorGraph | 6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8 | [
"MIT"
] | 48 | 2018-05-15T12:46:36.000Z | 2021-03-11T09:34:10.000Z | traincrf.py | chaitanyamalaviya/NeuralFactorGraph | 6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8 | [
"MIT"
] | 1 | 2018-10-28T21:11:47.000Z | 2018-10-31T20:31:09.000Z | traincrf.py | chaitanyamalaviya/NeuralFactorGraph | 6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8 | [
"MIT"
] | 6 | 2018-07-03T01:28:41.000Z | 2020-01-23T13:25:49.000Z | from __future__ import division, print_function
import argparse
import numpy as np
import pdb
import os
import time
import random
import factorial_crf_tagger
import utils
import unit
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
parser = argparse.ArgumentParser()
parser.add_argument("--treebank_path", type=str,
default="/projects/tir2/users/cmalaviy/ud_exp/ud-treebanks-v2.1/")
parser.add_argument("--optim", type=str, default='adam', choices=["sgd","adam","adagrad"])
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--emb_dim", type=int, default=128)
parser.add_argument("--hidden_dim", type=int, default=256)
parser.add_argument("--mlp_dim", type=int, default=128)
parser.add_argument("--n_layers", type=int, default=2)
parser.add_argument("--dropout", type=float, default=0.2)
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--langs", type=str, default="uk",
help="Languages separated by delimiter '/' with last language being target language")
parser.add_argument("--tgt_size", type=int, default=None,
help="Number of training sentences for target language")
parser.add_argument("--model_name", type=str, default="model_dcrf")
parser.add_argument("--no_transitions", action='store_true')
parser.add_argument("--no_pairwise", action='store_true')
parser.add_argument("--continue_train", action='store_true')
parser.add_argument("--model_type", type=str, default="baseline", choices=["universal","joint","mono","specific","baseline"])
parser.add_argument("--sum_word_char", action='store_true')
parser.add_argument("--sent_attn", action='store_true')
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--test", action='store_true')
parser.add_argument("--visualize", action='store_true')
parser.add_argument("--gpu", action='store_true')
parser.add_argument("--unit_test", action='store_true')
parser.add_argument("--unit_test_args", type=str, default="2,2,2")
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
print(args)
# Set seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
langs = args.langs.split("/")
lang_to_code, code_to_lang = utils.get_lang_code_dicts()
# Set model name
args.model_name += "_" + args.model_type + "".join(["_" + l for l in langs])
# if args.sum_word_char:
# args.model_name += "-wc_sum"
if args.sent_attn:
args.model_name += "-sent_attn"
if args.tgt_size:
args.model_name += "-" + str(args.tgt_size)
if args.no_transitions:
args.model_name += "-no_transitions"
if args.no_pairwise:
args.model_name += "-no_pairwise"
# Get training data
print("Loading training data...")
training_data_langwise, train_tgt_labels = utils.read_conll(args.treebank_path, langs, code_to_lang, tgt_size=args.tgt_size, train_or_dev="train")
training_data = []
train_lang_ids = []
# labels_to_ix = train_tgt_labels
unique_tags = utils.find_unique_tags(train_tgt_labels, null_label=True)
print("Number of unique tags: %d" % unique_tags.size())
# unique_tags.printTags()
# Oversample target language data
if args.tgt_size==100 and args.model_type!="mono":
training_data_langwise[langs[-1]] = training_data_langwise[langs[-1]] * 10
# Add null labels to tag sets in training data
training_data_langwise = utils.addNullLabels(training_data_langwise, langs, unique_tags)
# Create batches for training
train_order = []
train_lang_ids = []
startIdx = 0
for l in langs:
training_data_langwise[l], lang_ids = utils.sortbylength(training_data_langwise[l], [l]*len(training_data_langwise[l]))
if args.batch_size != 1:
train_order += utils.get_train_order(training_data_langwise[l], args.batch_size, startIdx=startIdx)
training_data += training_data_langwise[l]
train_lang_ids += [l]*len(training_data_langwise[l])
startIdx = len(training_data)
print("%d sentences in training set" %len(training_data))
if args.unit_test:
training_data = []
no_tags, no_labels, no_timesteps = [int(arg) for arg in args.unit_test_args.strip().split(",")]
training_data, train_tgt_labels = unit.create_sample_data(int(no_tags), [int(no_labels)]*int(no_tags), int(no_timesteps))
# training_data, train_tgt_labels = unit.create_sample_data(int(no_tags), [2,3], int(no_timesteps))
training_data = [training_data]
dev_data_langwise, dev_tgt_labels = utils.read_conll(args.treebank_path, [langs[-1]], code_to_lang, train_or_dev="dev")
# Add null labels to tag sets in dev data
dev_data_langwise = utils.addNullLabels(dev_data_langwise, [langs[-1]], unique_tags)
dev_data = dev_data_langwise[langs[-1]]
dev_lang_ids = [langs[-1]]*len(dev_data)
## Sort train/valid set before minibatching
dev_data, dev_lang_ids = utils.sortbylength(dev_data, dev_lang_ids)
if args.test:
test_lang = langs[-1]
test_data_langwise, test_tgt_labels = utils.read_conll(args.treebank_path, [test_lang], code_to_lang, train_or_dev="test", test=True)
test_data_langwise = utils.addNullLabels(test_data_langwise, [test_lang], unique_tags)
test_data = test_data_langwise[test_lang]
test_data, test_lang_ids = utils.sortbylength(test_data, [langs[-1]]*len(test_data))
# Store starting index of each minibatch
if args.batch_size != 1:
print("Training Set: %d batches" %len(train_order))
dev_order = utils.get_train_order(dev_data, args.batch_size)
print("Dev Set: %d batches" %len(dev_order))
if args.test:
test_order = utils.get_train_order(test_data, args.batch_size)
print("Test Set: %d batches" %len(test_order))
else:
train_order = [(i,i) for i in range(len(training_data))]
dev_order = [(i,i) for i in range(len(dev_data))]
if args.test:
test_order = [(i,i) for i in range(len(test_data))]
# Build word and character dictionaries
word_to_ix = {}
char_to_ix = {}
word_freq = {}
for sent, _ in training_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
if word_to_ix[word] not in word_freq:
word_freq[word_to_ix[word]] = 1
else:
word_freq[word_to_ix[word]] += 1
for char in word:
if char not in char_to_ix:
char_to_ix[char] = len(char_to_ix)
word_to_ix["UNK"] = len(word_to_ix)
char_to_ix["UNK"] = len(char_to_ix)
def main():
if not os.path.isfile(args.model_name) or args.continue_train:
if args.continue_train:
print("Loading tagger model from " + args.model_name + "...")
tagger_model = torch.load(args.model_name, map_location=lambda storage, loc: storage)
if args.gpu:
tagger_model = tagger_model.cuda()
else:
print("Creating new model...")
tagger_model = factorial_crf_tagger.DynamicCRF(args, word_freq, langs, len(char_to_ix), \
len(word_to_ix), unique_tags)
if args.gpu:
tagger_model = tagger_model.cuda()
if args.unit_test:
tests = unit.TestBP()
labelSum = sum([tag.size() for tag in tagger_model.uniqueTags])
# Create dummy LSTM features
lstm_feats = utils.get_var(torch.Tensor(torch.randn(len(training_data[0][0]), labelSum)), args.gpu)
tests.setUp(tagger_model, training_data[0][1], len(training_data[0][0]), lstm_feats)
loss_function = nn.NLLLoss()
# Provide (N,C) log probability values as input
# loss_function = nn.CrossEntropyLoss()
if args.optim=="sgd":
optimizer = optim.SGD(tagger_model.parameters(), lr=1.0)
elif args.optim=="adam":
optimizer = optim.Adam(tagger_model.parameters())
elif args.optim=="adagrad":
optimizer = optim.Adagrad(tagger_model.parameters())
print("Training FCRF-LSTM model...")
patience_counter = 0
prev_avg_tok_accuracy = 0
for epoch in xrange(args.epochs):
accuracies = []
sent = 0
batch_idx = 0
tokens = 0
cum_loss = 0
correct = 0
random.shuffle(train_order)
print("Starting epoch %d .." %epoch)
start_time = time.time()
for start_idx, end_idx in train_order:
train_data = training_data[start_idx : end_idx + 1]
train_sents = [elem[0] for elem in train_data]
morph_sents = [elem[1] for elem in train_data]
lang_ids = train_lang_ids[start_idx : end_idx + 1]
sent += end_idx - start_idx + 1
tokens += sum([len(sentence) for sentence in train_sents])
batch_idx += 1
if batch_idx%5==0:
print("[Epoch %d] \
Sentence %d/%d, \
Tokens %d \
Cum_Loss: %f \
Time: %f \
Tokens/Sec: %d"
# Average Accuracy: %f"
% (epoch, sent, len(training_data), tokens,
cum_loss/tokens, time.time() - start_time, tokens/(time.time()-start_time)))
# , correct/tokens))
tagger_model.zero_grad()
sents_in = []
for i, sentence in enumerate(train_sents):
sent_in = []
lang_id = []
if args.model_type=="universal":
lang_id = [lang_ids[i]]
for word in sentence:
s_appended_word = lang_id + [c for c in word] + lang_id
word_in = utils.prepare_sequence(s_appended_word, char_to_ix, args.gpu)
# targets = utils.prepare_sequence(s_appended_word[1:], char_to_ix, args.gpu)
sent_in.append(word_in)
sents_in.append(sent_in)
# sents_in = torch.stack(sent_in)
tagger_model.char_hidden = tagger_model.init_hidden()
tagger_model.hidden = tagger_model.init_hidden()
if args.sum_word_char:
all_word_seq = []
for sentence in train_sents:
word_seq = utils.prepare_sequence(sentence, word_to_ix, args.gpu)
all_word_seq.append(word_seq)
else:
all_word_seq = None
if args.model_type=="specific" or args.model_type=="joint":
lstm_feat_sents, graph, maxVal = tagger_model(sents_in, morph_sents, word_idxs=all_word_seq, langs=lang_ids)
else:
lstm_feat_sents, graph, maxVal = tagger_model(sents_in, morph_sents, word_idxs=all_word_seq)
# Skip parameter updates if marginals are not within a threshold
if maxVal > 10.00:
print("Skipping parameter updates...")
continue
# Compute the loss, gradients, and update the parameters
all_factors_batch = []
for k in range(len(train_sents)):
all_factors = tagger_model.get_scores(graph, morph_sents[k], lstm_feat_sents[k], k)
all_factors_batch.append(all_factors)
loss = tagger_model.compute_loss(all_factors_batch, loss_function)
# print("Loss:", loss)
cum_loss += loss.cpu().data[0]
loss.backward()
# tagger_model.gradient_check(all_factors_batch[0])
optimizer.step()
print("Loss: %f" % loss.cpu().data.numpy())
print("Saving model..")
torch.save(tagger_model, args.model_name)
if (epoch+1)%4==0:
print("Evaluating on dev set...")
avg_tok_accuracy, f1_score = eval_on_dev(tagger_model, curEpoch=epoch)
# Early Stopping
if avg_tok_accuracy <= prev_avg_tok_accuracy:
patience_counter += 1
if patience_counter==args.patience:
print("Model hasn't improved on dev set for %d epochs. Stopping Training." % patience_counter)
break
prev_avg_tok_accuracy = avg_tok_accuracy
else:
print("Loading tagger model from " + args.model_name + "...")
tagger_model = torch.load(args.model_name, map_location=lambda storage, loc: storage)
if args.gpu:
tagger_model = tagger_model.cuda()
else:
tagger_model.gpu = False
if args.visualize:
print("[Visualization Mode]")
utils.plot_heatmap(unique_tags, tagger_model.pairwise_weights, "pair")
#utils.plot_heatmap(unique_tags, tagger_model.transition_weights, "trans")
#utils.plot_heatmap(unique_tags, tagger_model.lang_pairwise_weights, "pair", lang_idx=1)
print("Stored plots in figures/ directory!")
if args.test:
avg_tok_accuracy, f1_score = eval_on_dev(tagger_model, dev_or_test="test")
def eval_on_dev(tagger_model, curEpoch=None, dev_or_test="dev"):
correct = 0
toks = 0
all_out_tags = np.array([])
all_targets = np.array([])
eval_order = dev_order if dev_or_test=="dev" else test_order
eval_data = dev_data if dev_or_test=="dev" else test_data
print("Starting evaluation on %s set... (%d sentences)" % (dev_or_test, len(eval_data)))
lang_id = []
if args.model_type=="universal":
lang_id = [langs[-1]]
for start_idx, end_idx in eval_order:
cur_eval_data = eval_data[start_idx : end_idx + 1]
eval_sents = [elem[0] for elem in cur_eval_data]
morph_sents = [elem[1] for elem in cur_eval_data]
sents_in = []
for i, sentence in enumerate(eval_sents):
sent_in = []
for word in sentence:
s_appended_word = lang_id + [c for c in word] + lang_id
word_in = utils.prepare_sequence(s_appended_word, char_to_ix, args.gpu)
# targets = utils.prepare_sequence(s_appended_word[1:], char_to_ix, args.gpu)
sent_in.append(word_in)
sents_in.append(sent_in)
tagger_model.zero_grad()
tagger_model.char_hidden = tagger_model.init_hidden()
tagger_model.hidden = tagger_model.init_hidden()
all_word_seq = []
for sentence in eval_sents:
word_seq = utils.prepare_sequence(sentence, word_to_ix, args.gpu)
all_word_seq.append(word_seq)
if args.model_type=="specific" or args.model_type=="joint":
lstm_feats, graph, maxVal = tagger_model(sents_in, morph_sents, word_idxs=all_word_seq, langs=[langs[-1]]*len(sents_in), test=True)
else:
lstm_feats, graph, maxVal = tagger_model(sents_in, morph_sents, word_idxs=all_word_seq, test=True)
for k in range(len(eval_sents)):
hypSeq = tagger_model.getBestSequence(graph, k)
targets = [utils.unfreeze_dict(tags) for tags in morph_sents[k]]
correct += utils.getCorrectCount(targets, hypSeq)
toks += len(eval_sents[k])
all_out_tags = np.append(all_out_tags, hypSeq)
all_targets = np.append(all_targets, targets)
avg_tok_accuracy = correct / toks
prefix = args.model_name
prefix += "_" + dev_or_test
if args.sent_attn:
prefix += "sent_attn"
if args.tgt_size:
prefix += "_" + str(args.tgt_size)
write = True if dev_or_test=="test" else False
f1_score, f1_micro_score = utils.computeF1(all_out_tags, all_targets, prefix, write_results=write)
print("Test Set Accuracy: %f" % avg_tok_accuracy)
print("Test Set Avg F1 Score (Macro): %f" % f1_score)
print("Test Set Avg F1 Score (Micro): %f" % f1_micro_score)
if write:
with open(prefix + '_results_f1.txt', 'ab') as file:
file.write("\nAccuracy: " + str(avg_tok_accuracy) + "\n")
for target, hyp in zip(all_targets, all_out_tags):
file.write(str(target) + "\n")
file.write(str(hyp) + "\n")
return avg_tok_accuracy, f1_score
# def eval_on_test(tagger_model):
# correct = 0
# toks = 0
# all_out_tags = np.array([])
# all_targets = np.array([])
# print("Starting evaluation on test set... (%d sentences)" % (len(test_data)))
# lang_id = []
# if args.model_type=="universal":
# lang_id = [lang]
# for sentence, morph in test_data:
# tagger_model.zero_grad()
# tagger_model.char_hidden = tagger_model.init_hidden()
# tagger_model.hidden = tagger_model.init_hidden()
# sent_in = []
# for word in sentence:
# s_appended_word = lang_id + [c for c in word] + lang_id
# word_in = utils.prepare_sequence(s_appended_word, char_to_ix, args.gpu)
# sent_in.append(word_in)
# # sentence_in = utils.prepare_sequence(sentence, word_to_ix, args.gpu)
# # targets = utils.prepare_sequence(morph, labels_to_ix, args.gpu)
# # if args.sum_word_char:
# word_seq = [utils.prepare_sequence(sentence, word_to_ix, args.gpu)]
# # else:
# # word_seq = None
# if args.model_type=="specific" or args.model_type=="joint":
# lstm_feats, graph, maxVal = tagger_model([sent_in], [morph], word_idxs=word_seq, lang=langs[-1], test=True)
# else:
# lstm_feats, graph, maxVal = tagger_model([sent_in], [morph], word_idxs=word_seq, test=True)
# hypSeq = tagger_model.getBestSequence(graph, 0)
# targets = [utils.unfreeze_dict(tags) for tags in morph]
# # correct += np.count_nonzero(out_tags==targets)
# correct += utils.getCorrectCount(targets, hypSeq)
# toks += len(sentence)
# all_out_tags = np.append(all_out_tags, hypSeq)
# all_targets = np.append(all_targets, targets)
# avg_tok_accuracy = correct / toks
# prefix = args.model_type + "_"
# if args.sum_word_char:
# prefix = "wc-sum-hf_" + prefix
# prefix += "-".join([l for l in langs]) + "_test"
# if args.sent_attn:
# prefix += "sent_attn"
# if args.tgt_size:
# prefix += "_" + str(args.tgt_size)
# f1_score, f1_micro_score = utils.computeF1(all_out_tags, all_targets, prefix, write_results=True)
# print("Test Set Accuracy: %f" % avg_tok_accuracy)
# print("Test Set Avg F1 Score (Macro): %f" % f1_score)
# print("Test Set Avg F1 Score (Micro): %f" % f1_micro_score)
# with open(prefix + '_results_f1.txt', 'a') as file:
# file.write("\nAccuracy: " + str(avg_tok_accuracy) + "\n")
# return avg_tok_accuracy, f1_score
if __name__=="__main__":
main()
| 39.582988 | 146 | 0.629436 |
acf1297fa86fea6c21edafd272850667e3ac4371 | 892 | py | Python | src/sentry/api/endpoints/group_stats.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 2 | 2019-03-04T12:45:54.000Z | 2019-03-04T12:45:55.000Z | src/sentry/api/endpoints/group_stats.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | src/sentry/api/endpoints/group_stats.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2018-07-02T09:46:44.000Z | 2018-07-02T09:46:44.000Z | from __future__ import absolute_import
from rest_framework.response import Response
from sentry import tsdb
from sentry.api.base import EnvironmentMixin, StatsMixin
from sentry.api.bases.group import GroupEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import Environment
class GroupStatsEndpoint(GroupEndpoint, EnvironmentMixin, StatsMixin):
def get(self, request, group):
try:
environment_id = self._get_environment_id_from_request(
request,
group.project.organization_id,
)
except Environment.DoesNotExist:
raise ResourceDoesNotExist
data = tsdb.get_range(
model=tsdb.models.group, keys=[group.id], **self._parse_args(
request,
environment_id,
)
)[group.id]
return Response(data)
| 29.733333 | 73 | 0.674888 |
acf12a2b07f1d82925a4e264ca68c6946730f760 | 6,133 | py | Python | flocker/common/_ipc.py | wallnerryan/flocker-profiles | bcd3ced8edf4af86a68070ff6a714c45f9f4913b | [
"Apache-2.0"
] | null | null | null | flocker/common/_ipc.py | wallnerryan/flocker-profiles | bcd3ced8edf4af86a68070ff6a714c45f9f4913b | [
"Apache-2.0"
] | null | null | null | flocker/common/_ipc.py | wallnerryan/flocker-profiles | bcd3ced8edf4af86a68070ff6a714c45f9f4913b | [
"Apache-2.0"
] | null | null | null | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Inter-process communication for flocker.
"""
from subprocess import Popen, PIPE, check_output, CalledProcessError
from contextlib import contextmanager
from io import BytesIO
from threading import current_thread
from pipes import quote
from zope.interface import Interface, implementer
from characteristic import with_cmp, with_repr
class INode(Interface):
"""
A remote node with which this node can communicate.
"""
def run(remote_command):
"""Context manager that runs a remote command and return its stdin.
The returned file-like object will be closed by this object.
:param remote_command: ``list`` of ``bytes``, the command to run
remotely along with its arguments.
:return: file-like object that can be written to.
"""
def get_output(remote_command):
"""Run a remote command and return its stdout.
May raise an exception if an error of some sort occured.
:param remote_command: ``list`` of ``bytes``, the command to run
remotely along with its arguments.
:return: ``bytes`` of stdout from the remote command.
"""
@with_cmp(["initial_command_arguments"])
@with_repr(["initial_command_arguments"])
@implementer(INode)
class ProcessNode(object):
"""
Communicate with a remote node using a subprocess.
"""
def __init__(self, initial_command_arguments, quote=lambda d: d):
"""
:param initial_command_arguments: ``tuple`` of ``bytes``, initial
command arguments to prefix to whatever arguments get passed to
``run()``.
:param quote: Callable that transforms the non-initial command
arguments, converting a list of ``bytes`` to a list of
``bytes``. By default does nothing.
"""
self.initial_command_arguments = tuple(initial_command_arguments)
self._quote = quote
@contextmanager
def run(self, remote_command):
process = Popen(
self.initial_command_arguments +
tuple(map(self._quote, remote_command)),
stdin=PIPE)
try:
yield process.stdin
finally:
process.stdin.close()
exit_code = process.wait()
if exit_code:
# We should really capture this and stderr better:
# https://clusterhq.atlassian.net/browse/FLOC-155
raise IOError("Bad exit", remote_command, exit_code)
def get_output(self, remote_command):
try:
return check_output(
self.initial_command_arguments +
tuple(map(self._quote, remote_command)))
except CalledProcessError as e:
# We should really capture this and stderr better:
# https://clusterhq.atlassian.net/browse/FLOC-155
raise IOError("Bad exit", remote_command, e.returncode, e.output)
@classmethod
def using_ssh(cls, host, port, username, private_key):
"""Create a ``ProcessNode`` that communicate over SSH.
:param bytes host: The hostname or IP.
:param int port: The port number of the SSH server.
:param bytes username: The username to SSH as.
:param FilePath private_key: Path to private key to use when talking to
SSH server.
:return: ``ProcessNode`` instance that communicates over SSH.
"""
return cls(initial_command_arguments=(
b"ssh",
b"-q", # suppress warnings
b"-i", private_key.path,
b"-l", username,
# We're ok with unknown hosts; we'll be switching away from
# SSH by the time Flocker is production-ready and security is
# a concern.
b"-o", b"StrictHostKeyChecking=no",
# The tests hang if ControlMaster is set, since OpenSSH won't
# ever close the connection to the test server.
b"-o", b"ControlMaster=no",
# Some systems (notably Ubuntu) enable GSSAPI authentication which
# involves a slow DNS operation before failing and moving on to a
# working mechanism. The expectation is that key-based auth will
# be in use so just jump straight to that. An alternate solution,
# explicitly disabling GSSAPI, has cross-version platform and
# cross-version difficulties (the options aren't always recognized
# and result in an immediate failure). As mentioned above, we'll
# switch away from SSH soon.
b"-o", b"PreferredAuthentications=publickey",
b"-p", b"%d" % (port,), host), quote=quote)
@implementer(INode)
class FakeNode(object):
"""
Pretend to run a command.
This is useful for testing.
:ivar remote_command: The arguments to the last call to ``run()`` or
``get_output()``.
:ivar stdin: `BytesIO` returned from last call to ``run()``.
:ivar thread_id: The ID of the thread ``run()`` or ``get_output()``
ran in.
"""
def __init__(self, outputs=()):
"""
:param outputs: Sequence of results for ``get_output()``, either
exceptions or ``bytes``. Exceptions will be raised, otherwise the
object will be returned.
"""
self._outputs = list(outputs)
@contextmanager
def run(self, remote_command):
"""
Store arguments and in-memory "stdin".
"""
self.thread_id = current_thread().ident
self.stdin = BytesIO()
self.remote_command = remote_command
yield self.stdin
self.stdin.seek(0, 0)
def get_output(self, remote_command):
"""
Return (or if an exception, raise) the next remaining output of the
ones passed to the constructor.
"""
self.thread_id = current_thread().ident
self.remote_command = remote_command
result = self._outputs.pop(0)
if isinstance(result, Exception):
raise result
else:
return result
| 35.247126 | 79 | 0.622208 |
acf12a95945e96ca8fb96c874eefe8a981873f59 | 3,613 | py | Python | forecasting/forecast/update_forecast_models.py | jdvelasq/demand-forecast-using-time-series-clustering | 5d08f225076dbd370944202143e85e46a8962655 | [
"MIT"
] | null | null | null | forecasting/forecast/update_forecast_models.py | jdvelasq/demand-forecast-using-time-series-clustering | 5d08f225076dbd370944202143e85e46a8962655 | [
"MIT"
] | null | null | null | forecasting/forecast/update_forecast_models.py | jdvelasq/demand-forecast-using-time-series-clustering | 5d08f225076dbd370944202143e85e46a8962655 | [
"MIT"
] | 1 | 2021-07-17T19:28:24.000Z | 2021-07-17T19:28:24.000Z | from common.transform_data import decompose_series_with_periods
import pandas as pd
import numpy as np
import pickle
import os
__author__ = "Jose Fernando Montoya Cardona"
__credits__ = ["Jose Fernando Montoya Cardona"]
__email__ = "jomontoyac@unal.edu.co"
def load_model_cluster(name_cluster, dir_model_train, transform='decompose-Fourier'):
n_cluster = [name_cluster]
files_models = dir_model_train.split(',')
df_models = pd.DataFrame(files_models, columns=['name_file'])
df_models['num_cluster'] = df_models.name_file.apply(lambda x: x.split(os.sep)[-1].split('_')[-2].split('-')[1])
df_cluster_filter = df_models.query('num_cluster in @n_cluster')
if df_cluster_filter.shape[0] == 0:
raise ValueError(
'the {} number cluster does not exist in models training. Must be retrain model.'.format(name_cluster))
else:
if transform == 'decompose-Fourier' or transform == 'decompose':
df_cluster_filter['list_decompose'] = df_cluster_filter.name_file.apply(
lambda x: x.split(os.sep)[-1].split('_')[-4].split('-')[1:])
df_cluster_filter['pipeline'] = df_cluster_filter.name_file.apply(lambda x: pickle.load(open(x, 'rb')))
return df_cluster_filter.pipeline.values[0], df_cluster_filter.list_decompose.values[0]
else:
raise ValueError('invalid variable transform {}.'.format(transform))
def forecast_arima_model(data_train, num_update, num_forecast, dir_model_train, transform='decompose-Fourier'
, type_decompose='additive', filter_decompose=None):
print('number_cluster: ', data_train.name)
pipeline, list_decompose = load_model_cluster(data_train.name, dir_model_train, transform=transform)
data_train = np.array(data_train)[~np.isnan(np.array(data_train))]
# print(pipeline.summary())
if transform == 'decompose-Fourier' or transform == 'decompose':
if num_update == 0:
forecast_seasonal, trend_residual, gap = decompose_series_with_periods(data=data_train
, list_periods=list_decompose
, type_decompose=type_decompose
, num_forecast=num_forecast)
forecast_trend_residual = np.array(pipeline.predict(num_forecast + gap))
forecast = forecast_seasonal + forecast_trend_residual[gap:]
return forecast, pipeline
else:
forecast_seasonal, trend_residual_new, gap = decompose_series_with_periods(data=data_train[num_update:]
, list_periods=list_decompose
, type_decompose=type_decompose
, num_forecast=num_forecast)
data_update = trend_residual_new[-num_update:]
print('\t\t Executing update pipeline...')
pipeline.update(data_update, maxiter=50)
print('\t\t Finish update pipeline.')
# print(pipeline.summary())
forecast_trend_residual = np.array(pipeline.predict(num_forecast + gap))
forecast = forecast_seasonal + forecast_trend_residual[gap:]
return forecast, pipeline
else:
raise ValueError('invalid variable transform {}.'.format(transform)) | 58.274194 | 118 | 0.604207 |
acf12b333e08a84f449a1cf203619a525e091c2f | 551 | py | Python | src/test/resources/test-blueprint/capability_python/Scripts/python/SampleRAProcessor.py | eliezio/blueprintsprocessor | 8e5d445d627fb36106468fed1d8d21988f59f976 | [
"Apache-2.0"
] | 1 | 2019-07-16T10:51:00.000Z | 2019-07-16T10:51:00.000Z | src/test/resources/test-blueprint/capability_python/Scripts/python/SampleRAProcessor.py | excelsior-esy/blueprintsprocessor | 8e5d445d627fb36106468fed1d8d21988f59f976 | [
"Apache-2.0"
] | null | null | null | src/test/resources/test-blueprint/capability_python/Scripts/python/SampleRAProcessor.py | excelsior-esy/blueprintsprocessor | 8e5d445d627fb36106468fed1d8d21988f59f976 | [
"Apache-2.0"
] | null | null | null | from abstract_ra_processor import AbstractRAProcessor
from blueprint_constants import *
class SampleRAProcessor(AbstractRAProcessor):
def __init__(self):
AbstractRAProcessor.__init__(self)
def process(self, resource_assignment):
print "Processing calling.." + PROPERTY_BLUEPRINT_BASE_PATH
self.set_resource_data_value(resource_assignment, "")
return None
def recover(self, runtime_exception, resource_assignment):
print "Recovering calling.." + PROPERTY_BLUEPRINT_BASE_PATH
return None
| 30.611111 | 67 | 0.753176 |
acf12b51e149784566f09078ee76116c60aa8cf8 | 3,273 | py | Python | servicecatalog_puppet/template_builder/hub/bootstrap_region.py | richardmilnerwatts/aws-service-catalog-puppet | cd866860073b0e43722cc941bacd38df4e84f7a1 | [
"Apache-2.0"
] | 66 | 2019-04-23T12:10:27.000Z | 2022-03-31T19:47:09.000Z | servicecatalog_puppet/template_builder/hub/bootstrap_region.py | richardmilnerwatts/aws-service-catalog-puppet | cd866860073b0e43722cc941bacd38df4e84f7a1 | [
"Apache-2.0"
] | 283 | 2019-04-18T22:21:20.000Z | 2022-03-31T10:04:42.000Z | servicecatalog_puppet/template_builder/hub/bootstrap_region.py | richardmilnerwatts/aws-service-catalog-puppet | cd866860073b0e43722cc941bacd38df4e84f7a1 | [
"Apache-2.0"
] | 47 | 2019-05-14T12:45:57.000Z | 2022-03-25T17:54:22.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import troposphere as t
from troposphere import s3
from troposphere import sns
from troposphere import ssm
def get_template(version: str, default_region_value) -> t.Template:
description = f"""Bootstrap template used to bootstrap a region of ServiceCatalog-Puppet master
{{"version": "{version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master-region"}}"""
template = t.Template(Description=description)
version_parameter = template.add_parameter(
t.Parameter("Version", Default=version, Type="String")
)
default_region_value_parameter = template.add_parameter(
t.Parameter("DefaultRegionValue", Default=default_region_value, Type="String")
)
template.add_resource(
ssm.Parameter(
"DefaultRegionParam",
Name="/servicecatalog-puppet/home-region",
Type="String",
Value=t.Ref(default_region_value_parameter),
Tags={"ServiceCatalogPuppet:Actor": "Framework"},
)
)
version_ssm_parameter = template.add_resource(
ssm.Parameter(
"Param",
Name="service-catalog-puppet-regional-version",
Type="String",
Value=t.Ref(version_parameter),
Tags={"ServiceCatalogPuppet:Actor": "Framework"},
)
)
template.add_resource(
s3.Bucket(
"PipelineArtifactBucket",
BucketName=t.Sub(
"sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}"
),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
BlockPublicAcls=True,
BlockPublicPolicy=True,
IgnorePublicAcls=True,
RestrictPublicBuckets=True,
),
Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}),
)
)
regional_product_topic = template.add_resource(
sns.Topic(
"RegionalProductTopic",
DisplayName="servicecatalog-puppet-cloudformation-regional-events",
TopicName="servicecatalog-puppet-cloudformation-regional-events",
Subscription=[
sns.Subscription(
Endpoint=t.Sub(
"arn:${AWS::Partition}:sqs:${DefaultRegionValue}:${AWS::AccountId}:servicecatalog-puppet-cloudformation-events"
),
Protocol="sqs",
)
],
),
)
template.add_output(
t.Output("Version", Value=t.GetAtt(version_ssm_parameter, "Value"))
)
template.add_output(
t.Output("RegionalProductTopic", Value=t.Ref(regional_product_topic))
)
return template
| 35.576087 | 135 | 0.601894 |
acf12bdf28f44076bcfe7f11332e2b2e1f861ab8 | 13,886 | py | Python | src/device_ftp.py | urpylka/temp-part-downloader | 9167148f27fefdfe56a7b1e2d84479cec56885c5 | [
"Apache-2.0"
] | 2 | 2019-02-18T14:04:30.000Z | 2019-06-06T08:39:33.000Z | src/device_ftp.py | urpylka/temp-part-downloader | 9167148f27fefdfe56a7b1e2d84479cec56885c5 | [
"Apache-2.0"
] | 2 | 2019-06-20T14:33:18.000Z | 2019-09-13T08:45:12.000Z | src/device_ftp.py | urpylka/temp-part-downloader | 9167148f27fefdfe56a7b1e2d84479cec56885c5 | [
"Apache-2.0"
] | 3 | 2019-08-02T12:58:13.000Z | 2019-09-09T16:36:15.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:set ts=4 sw=4 et:
# Copyright 2018-2019 Artem Smirnov
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/stilliard/docker-pure-ftpd
# https://github.com/stilliard/docker-pure-ftpd/wiki/Basic-example-walk-through
import os
import time
import ftplib
from threading import RLock
from device_abstract import device_abstract
# class my_ftp(ftplib.FTP):
# # try:
# # import ssl
# # except ImportError:
# # _SSLSocket = None
# # else:
# # _SSLSocket = ssl.SSLSocket
# def my_storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
# """
# Пришлось исправить стандартный метод
# перестановкой вызова callback
# """
# self.voidcmd('TYPE I')
# with self.transfercmd(cmd, rest) as conn:
# while 1:
# buf = fp.read(blocksize)
# if not buf: break
# if callback: callback(buf)
# conn.sendall(buf)
# return self.voidresp()
# def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
# """Store a file in binary mode. A new port is created for you.
# Args:
# cmd: A STOR command.
# fp: A file-like object with a read(num_bytes) method.
# blocksize: The maximum data size to read from fp and send over
# the connection at once. [default: 8192]
# callback: An optional single parameter callable that is called on
# each block of data after it is sent. [default: None]
# rest: Passed to transfercmd(). [default: None]
# Returns:
# The response code.
# """
# self.voidcmd('TYPE I')
# with self.transfercmd(cmd, rest) as conn:
# while 1:
# buf = fp.read(blocksize)
# if not buf:
# break
# conn.sendall(buf)
# if callback:
# callback(buf)
# # shutdown ssl layer
# if _SSLSocket is not None and isinstance(conn, _SSLSocket):
# conn.unwrap()
# return self.voidresp()
# def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
# """Retrieve data in binary mode. A new port is created for you.
# Args:
# cmd: A RETR command.
# callback: A single parameter callable to be called on each
# block of data read.
# blocksize: The maximum number of bytes to read from the
# socket at one time. [default: 8192]
# rest: Passed to transfercmd(). [default: None]
# Returns:
# The response code.
# """
# self.voidcmd('TYPE I')
# with self.transfercmd(cmd, rest) as conn:
# while 1:
# data = conn.recv(blocksize)
# if not data:
# break
# callback(data)
# # shutdown ssl layer
# if _SSLSocket is not None and isinstance(conn, _SSLSocket):
# conn.unwrap()
# return self.voidresp()
class device_ftp(device_abstract):
"""
target = device_ftp("192.168.0.10", "test-1", "passwd", logging)
with open("/home/pi/flir/20181113_205519_20181113212352517.JPG", 'rb') as source_stream:
target.upload(source_stream, "20181113_205519_20181113212352517.JPG")
Можно сделать ввод количества параллельных соединений и сделать вместо блокировки семафор
Два раза пишет что FTP недоступен
TODO:
* Параллельная работа с FTP (отдельные коннекшены)
* Проверка hash-суммы
"""
_internal_lock = RLock()
_ftp = ftplib.FTP()
@staticmethod
def to_string(dic): return "device_ftp://" + dic["user"] + "@" + dic["host"] + ":~"
@staticmethod
def get_fields():
list = []
list.append("logger")
list.append("host")
list.append("user")
list.append("passwd")
return list
def __del__(self):
self._ftp.abort()
self._ftp.close()
def _connect(self):
self.is_remote_available.clear()
self._prefix = self.kwargs["user"] + "@" + self.kwargs["host"] + ": "
self.kwargs["logger"].info(self._prefix + "FTP is unavailble. All operations is lock")
while True:
time.sleep(1)
# starttime = time.time()
retry = False
try: self._ftp.voidcmd("NOOP")
except: retry = True
while retry:
try:
self._ftp.connect(self.kwargs["host"])
self._ftp.login(self.kwargs["user"], self.kwargs["passwd"])
retry = False
if not self.is_remote_available.is_set():
self.is_remote_available.set()
self.kwargs["logger"].info(self._prefix + "FTP is availble. All operations is unlock")
except ftplib.error_perm as ex_perm:
# retry = True
self.kwargs["logger"].error(self._prefix + "_connect(): " + str(ex_perm))
if self.is_remote_available.is_set():
self.is_remote_available.clear()
self.kwargs["logger"].info(self._prefix + "FTP is unavailble. All operations is lock")
except IOError as ex:
# retry = True
# self.kwargs["logger"].info("TARGET: Time disconnected - " + str(time.time() - starttime))
# ошибка 111 - если хост недоступен
self.kwargs["logger"].debug(self._prefix + "_connect(): " + str(ex))
if self.is_remote_available.is_set():
self.is_remote_available.clear()
self.kwargs["logger"].info(self._prefix + "FTP is unavailble. All operations is lock")
def get_size(self, device_path):
while 1:
with self._internal_lock:
self.is_remote_available.wait()
try:
self.is_remote_available.wait()
self._ftp.voidcmd('TYPE I')
response = self._ftp.size(device_path)
if not response is None:
return response
except Exception as ex:
# если файла еще нет, нужно продолжить с длиной в ноль
exc = str(ex)
if exc.startswith("550"):
self.kwargs["logger"].debug(self._prefix + "File was not uploaded to server yet: " + exc)
return 0
else:
raise Exception(self._prefix + "Can't get file size on ftp server: " + exc)
def upload(self, source_stream, device_path, chunk_size=8192):
# f_blocksize = 1024
# total_size = os.path.getsize(file_path)
# size_written = 0
# # http://qaru.site/questions/15601924/ftplib-storbinary-with-ftps-is-hangingnever-completing
# def handle(block):
# global size_written
# global total_size
# global f_blocksize
# size_written = size_written + f_blocksize if size_written + f_blocksize < total_size else total_size
# percent_complete = size_written / total_size * 100
# print("%s percent complete" %str(percent_complete))
# def _cb(self, buf):
# """
# Метод в первую очередь для ведения статистики количества
# записанный чанков в FTP
# Также можно считать количество информации записанной для ведения стастики
# """
# self.rest += len(self.buf)
# self.buf = buf
with self._internal_lock:
self.is_remote_available.wait()
self.kwargs["logger"].debug(self._prefix + "Uploading " + str(device_path))
while 1:
self.is_remote_available.wait()
try:
# без этого будет работать?
self._ftp.cwd(os.path.dirname(device_path))
already_sent = self.get_size(device_path) # already upload wo errors
self.kwargs["logger"].info(self._prefix + "Uploading " + str(device_path) + " Started w " + str(already_sent))
source_stream.seek(already_sent)
self.is_remote_available.wait()
# res = self._ftp.storbinary("STOR " + device_path, source_stream, blocksize=chunk_size, rest=already_sent)
self._ftp.voidcmd('TYPE I')
with self._ftp.transfercmd("STOR " + device_path, already_sent) as conn:
while 1:
# source_stream.show_stat()
buf = source_stream.read(chunk_size)
if not buf:
break
conn.sendall(buf)
# shutdown ssl layer
# if _SSLSocket is not None and isinstance(conn, _SSLSocket):
# conn.unwrap()
self.kwargs["logger"].debug(self._prefix + "Wrote to server: " + str(len(buf)))
# source_stream.show_stat()
res = self._ftp.voidresp()
if not res.startswith("200 I successfully done nothin"):
if not res.startswith("226 Transfer complete"):
raise Exception("File was not uploaded successful: " + res)
self.kwargs["logger"].debug(self._prefix + "End of the uploading")
break
except Exception as ex:
# [Errno 32] Broken pipe
# [Errno 104] Connection reset by peer
# Пустой Exception()
self.kwargs["logger"].error(self._prefix + "Uploading was interrupted: " + str(ex))
time.sleep(1)
def download(self, device_path, target_stream, chunk_size=8192):
with self._internal_lock:
self.is_remote_available.wait()
self.kwargs["logger"].debug(self._prefix + "Downloading " + str(device_path))
while 1:
self.is_remote_available.wait()
try:
# без этого будет работать?
self._ftp.cwd(os.path.dirname(device_path))
already_sent = target_stream.tell() # already upload wo errors
self.kwargs["logger"].info(self._prefix + "Resume w " + str(already_sent))
res = self._ftp.retrbinary("RETR " + device_path, target_stream.write, blocksize=chunk_size, rest=already_sent)
if not res.startswith("200 I successfully done nothin"):
if not res.startswith("226 Transfer complete"):
raise Exception("File was not uploaded successful: " + res)
break
except Exception as ex:
self.kwargs["logger"].error(self._prefix + "Downloading was interrupted: " + str(ex))
time.sleep(1)
def rename(self, old_path, new_path):
with self._internal_lock:
self.is_remote_available.wait()
self.kwargs["logger"].info(self._prefix + "Renaming " + str(old_path) + " to "+ str(new_path))
while 1:
self.is_remote_available.wait()
try:
# без этого будет работать?
self._ftp.cwd(os.path.dirname(old_path))
self._ftp.rename(old_path, new_path)
break
except Exception as ex:
self.kwargs["logger"].error(self._prefix + "Renaming was interrupted: " + str(ex))
time.sleep(1)
def get_list(self):
"""
Get list of files
"""
rootdir = '/'
with self._internal_lock:
self.is_remote_available.wait()
# без этого будет работать?
self._ftp.cwd(os.path.dirname(rootdir))
my_list = []
for filename in self._ftp.nlst():
path = os.path.join(rootdir, filename)
size = self.get_size(path)
my_list.append({"path": path, "size": size, "hash": ""})
return my_list
def delete(self, device_path):
with self._internal_lock:
self.is_remote_available.wait()
self.kwargs["logger"].info(self._prefix + "Deleting " + str(device_path))
while 1:
self.is_remote_available.wait()
try:
# без этого будет работать?
self._ftp.cwd(os.path.dirname(device_path))
self._ftp.delete(device_path)
break
except Exception as ex:
self.kwargs["logger"].error(self._prefix + "Deleting was interrupted: " + str(ex))
time.sleep(1)
| 36.638522 | 131 | 0.534783 |
acf12c7c9241ad3b4211441871ca3b508081a20f | 2,664 | py | Python | icu_mortality/patient_demographics/patient_demographics.py | RJBeetel3/mimic3_analysis | 5267a9cc9037da431bb257d157df8e00fab2d295 | [
"MIT"
] | 2 | 2018-11-27T07:47:10.000Z | 2020-03-02T07:45:06.000Z | icu_mortality/patient_demographics/patient_demographics.py | RJBeetel3/mimic3_analysis | 5267a9cc9037da431bb257d157df8e00fab2d295 | [
"MIT"
] | 1 | 2018-12-03T18:04:27.000Z | 2018-12-05T20:38:14.000Z | icu_mortality/patient_demographics/patient_demographics.py | RJBeetel3/mimic3_analysis | 5267a9cc9037da431bb257d157df8e00fab2d295 | [
"MIT"
] | 1 | 2018-03-10T23:23:17.000Z | 2018-03-10T23:23:17.000Z | """ This module tests functions in the patient demographics module including
the importation, preprocessing and selection of features.
"""
import sys
import os
import pandas as pd
from icu_mortality import DATA_DIR
"""import datetime as datetime
import numpy as np
from dateutil.relativedelta import relativedelta
from sklearn.preprocessing import OneHotEncoder
import matplotlib
import matplotlib.pyplot as plt
#import psycopg2
from scipy.stats import ks_2samp
import scipy.stats as scats
import visuals as vs
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
"""
#Define exceptions
class PtntDemogError(Exception): pass
class ImportDataError(PtntDemogError): pass
#class NotIntegerError(RomanError): pass
#class InvalidRomanNumeralError(RomanError): pass
def import_data(ptnt_demog_filename = os.path.join(DATA_DIR,'PTNT_DEMOG_FIRST24.csv')):
""" import raw data from patient demographics database query
the demographic data is constant across a patients ICU stay so the code
takes the first instance of the data and discards the duplicates.
:param ptnt_demog_filename:
:return:
"""
# import patient demographic data from .csv file.
try:
print(ptnt_demog_filename)
ptnt_demog_data = pd.read_csv(ptnt_demog_filename)
ptnt_demog_data = ptnt_demog_data.drop_duplicates(subset='icustay_id')
except IOError as e:
raise ImportDataError
print(e + "\n")
return ptnt_demog_data
def convert_datetimes(ptnt_demog_data):
""" convert date and time data to pandas date_time objects """
dates_and_times = ['dob', 'admittime', 'dischtime', 'intime', 'outtime', 'deathtime']
# iterate through the column names and convert each date time text value to
# pandas date-time objects
for thing in dates_and_times:
new_series = pd.to_datetime(ptnt_demog_data.loc[:, thing])
ptnt_demog_data.loc[:, thing] = new_series
return ptnt_demog_data
"""
if __name__ == "__main__":
# for debugging
#sys.argv = ['thisscript', 'nope.csv']
script_name, ptnt_demog_filename = sys.argv
import_data(ptnt_demog_filename)
""" | 28.956522 | 89 | 0.771396 |
acf12ce8140586f06cfa2db60ab72292140641cb | 1,509 | py | Python | classic/jetbot.py | andylucny/JetBotDemos | 9dadad9189bd70f5554b8a1c4e4cf07e788adfb1 | [
"MIT"
] | 2 | 2021-07-07T15:38:30.000Z | 2021-07-18T09:54:26.000Z | classic/jetbot.py | andylucny/JetBotDemos | 9dadad9189bd70f5554b8a1c4e4cf07e788adfb1 | [
"MIT"
] | null | null | null | classic/jetbot.py | andylucny/JetBotDemos | 9dadad9189bd70f5554b8a1c4e4cf07e788adfb1 | [
"MIT"
] | null | null | null | from Adafruit_MotorHAT import Adafruit_MotorHAT as HAT
class JetBot:
def __init__(self,speed=200):
self.speed = speed
left_motor_channel = 1
right_motor_channel = 2
self.driver = HAT(i2c_bus=1)
self.left_motor = self.driver.getMotor(left_motor_channel)
self.right_motor = self.driver.getMotor(right_motor_channel)
self.left_motor.setSpeed(self.speed)
self.right_motor.setSpeed(self.speed)
def left(self):
self.left_motor.run(HAT.BACKWARD)
self.right_motor.run(HAT.FORWARD)
def right(self):
self.left_motor.run(HAT.FORWARD)
self.right_motor.run(HAT.BACKWARD)
def forward(self):
self.left_motor.run(HAT.FORWARD)
self.right_motor.run(HAT.FORWARD)
def backward(self):
self.left_motor.run(HAT.BACKWARD)
self.right_motor.run(HAT.BACKWARD)
def stop(self):
self.left_motor.run(HAT.RELEASE)
self.right_motor.run(HAT.RELEASE)
def setSpeed(self,speed):
self.speed = speed
self.left_motor.setSpeed(self.speed)
self.right_motor.setSpeed(self.speed)
# Test
if __name__ == "__main__":
import time
robot = JetBot()
print('right')
robot.right()
time.sleep(1)
print('left')
robot.left()
time.sleep(1)
print('forward')
robot.forward()
time.sleep(1)
print('backward')
robot.backward()
time.sleep(1)
print('stop')
robot.stop()
| 25.576271 | 68 | 0.629556 |
acf12ce94210b43c6cdcb176055b2904a73576bf | 2,447 | py | Python | day3/day3p1.py | 1337Lurker/2019-advent-of-code | 7886ca97fc9b4081db1d864b9f4cb6cf509ea8bb | [
"MIT"
] | null | null | null | day3/day3p1.py | 1337Lurker/2019-advent-of-code | 7886ca97fc9b4081db1d864b9f4cb6cf509ea8bb | [
"MIT"
] | null | null | null | day3/day3p1.py | 1337Lurker/2019-advent-of-code | 7886ca97fc9b4081db1d864b9f4cb6cf509ea8bb | [
"MIT"
] | null | null | null | import fileinput
import datetime
from sympy import intersection
from sympy.geometry import Point, Segment, Polygon
ORIGIN = Point(0, 0)
def main():
print(f"starting! {datetime.datetime.now()}")
line_paths = [paths.split(",") for paths in fileinput.input()]
lines = [map_line(line_path) for line_path in line_paths]
print(f"finished importing {datetime.datetime.now()}")
first_path = lines[0]
second_path = lines[1]
print(f"starting intersection search {datetime.datetime.now()}")
intersections = []
for first_line in first_path:
for second_line in second_path:
intersection_points = intersection(first_line, second_line)
if len(intersection_points) > 0:
intersections.append(intersection_points.pop())
print(f"completed intersection search {datetime.datetime.now()}")
print(f"starting closest intersection search {datetime.datetime.now()}")
closest_point = ORIGIN
for point in intersections:
if point.is_zero:
continue
if closest_point.is_zero:
closest_point = point
if point.taxicab_distance(ORIGIN) < closest_point.taxicab_distance(ORIGIN):
closest_point = point
print(f"finished closest intersection search {datetime.datetime.now()}")
print(f"closest intersection: {closest_point} @ {closest_point.taxicab_distance(ORIGIN)} units")
def map_line(line_path):
origin_point = ORIGIN
line = []
for path in line_path:
if path[0] == "U":
new_point = (origin_point[0], origin_point[1] + int(path[1:]))
line.append(Segment(origin_point, new_point))
origin_point = new_point
elif path[0] == "D":
new_point = (origin_point[0], origin_point[1] - int(path[1:]))
line.append(Segment(origin_point, new_point))
origin_point = new_point
elif path[0] == "L":
new_point = (origin_point[0] - int(path[1:]), origin_point[1])
line.append(Segment(origin_point, new_point))
origin_point = new_point
elif path[0] == "R":
new_point = (origin_point[0] + int(path[1:]), origin_point[1])
line.append(Segment(origin_point, new_point))
origin_point = new_point
else:
raise Exception("wut")
return line
if __name__ == "__main__":
# execute only if run as a script
main()
| 32.626667 | 100 | 0.639967 |
acf12cef84633a032a72e3cbc3218d49f9a9b47d | 7,785 | py | Python | MeatFood_enve/Lib/site-packages/django/views/generic/base.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | MeatFood_enve/Lib/site-packages/django/views/generic/base.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | MeatFood_enve/Lib/site-packages/django/views/generic/base.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | import logging
from functools import update_wrapper
from django.core.exceptions import ImproperlyConfigured
from django.http import (
HttpResponse, HttpResponseGone, HttpResponseNotAllowed,
HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.decorators import classonlymethod
logger = logging.getLogger('django.request')
class ContextMixin:
"""
A default context mixin that passes the keyword arguments received by
get_context_data() as the template context.
"""
extra_context = None
def get_context_data(self, **kwargs):
kwargs.setdefault('view', self)
if self.extra_context is not None:
kwargs.update(self.extra_context)
return kwargs
class View:
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in kwargs.items():
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""Main entry point for a request-response process."""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(
'The method name %s is not accepted as a keyword argument '
'to %s().' % (key, cls.__name__)
)
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
self.setup(request, *args, **kwargs)
if not hasattr(self, 'request'):
raise AttributeError(
"%s instance has no 'request' attribute. Did you override "
"setup() and forget to call super()?" % cls.__name__
)
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def setup(self, request, *args, **kwargs):
"""Initialize attributes shared by all view methods."""
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning(
'Method Not Allowed (%s): %s', request.method, request.path,
extra={'status_code': 405, 'request': request}
)
return HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""Handle responding to requests for the OPTIONS HTTP verb."""
response = HttpResponse()
response.headers['Allow'] = ', '.join(self._allowed_methods())
response.headers['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin:
"""A mixin that can be used to render a template."""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Return a response, using the `response_class` for this view, with a
template rendered with the given context.
Pass response_kwargs to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response() is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
Render a template. Pass keyword arguments from the URLconf to the context.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""Provide a redirect on any GET request."""
permanent = False
url = None
pattern_name = None
query_string = False
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the URL pattern
match generating the redirect request are provided as kwargs to this
method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
else:
return None
args = self.request.META.get('QUERY_STRING', )
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return HttpResponsePermanentRedirect(url)
else:
return HttpResponseRedirect(url)
else:
logger.warning(
'Gone: %s', request.path,
extra={'status_code': 410, 'request': request}
)
return HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
| 35.547945 | 93 | 0.615029 |
acf12e0d5dea36ac27ad97a39f63ff4104b2ec58 | 14,176 | py | Python | giotto/time_series/embedding.py | L2F-abelganz/giotto-learn | c290c70fa0c2f05d543633b78e297b506e36e4de | [
"Apache-2.0"
] | 1 | 2019-10-16T11:41:40.000Z | 2019-10-16T11:41:40.000Z | giotto/time_series/embedding.py | L2F-abelganz/giotto-learn | c290c70fa0c2f05d543633b78e297b506e36e4de | [
"Apache-2.0"
] | null | null | null | giotto/time_series/embedding.py | L2F-abelganz/giotto-learn | c290c70fa0c2f05d543633b78e297b506e36e4de | [
"Apache-2.0"
] | null | null | null | """Time series embedding."""
# License: Apache 2.0
import numpy as np
from sklearn.base import BaseEstimator
from ..base import TransformerResamplerMixin
from sklearn.metrics import mutual_info_score
from sklearn.neighbors import NearestNeighbors
from joblib import Parallel, delayed
from sklearn.utils.validation import check_is_fitted, check_array, column_or_1d
from ..utils.validation import validate_params
class SlidingWindow(BaseEstimator, TransformerResamplerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
width : int, default: ``1``
Width of the sliding window.
stride : int, default: ``1``
Stride of the sliding window.
Examples
--------
>>> from giotto.time_series import SlidingWindow
"""
_hyperparameters = {'width': [int, (1, np.inf)],
'stride': [int, (1, np.inf)]}
def __init__(self, width=1, stride=1):
self.width = width
self.stride = stride
def _slice_windows(self, X):
n_windows = (X.shape[0] - self.width) // self.stride + 1
window_slices = [(i * self.stride, self.width + i * self.stride)
for i in range(n_windows)]
return window_slices
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self
"""
validate_params(self.get_params(), self._hyperparameters)
check_array(X, ensure_2d=False, allow_nd=True)
self._is_fitted = True
return self
def transform(self, X, y=None):
"""Slide windows over X.
Parameters
----------
X : ndarray, shape (n_samples, [n_features, ])
Input data.
y : None
Ignored.
Returns
-------
Xt : ndarray, shape (n_windows, n_samples_window, \
n_features)
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
X = check_array(X, ensure_2d=False, allow_nd=True)
window_slices = self._slice_windows(X)
Xt = np.stack([X[begin:end] for begin, end in window_slices])
return Xt
def resample(self, y, X=None):
"""Resample y.
Parameters
----------
y : ndarray, shape (n_samples, n_features)
Target.
X : None
There is no need of input data,
yet the pipeline API requires this parameter.
Returns
-------
yt : ndarray, shape (n_samples_new, 1)
The resampled target.
``n_samples_new = n_samples - 1``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
y = column_or_1d(y)
yt = y[self.width - 1:: self.stride]
return yt
class TakensEmbedding(BaseEstimator, TransformerResamplerMixin):
"""Representation of a univariate time series as a time series of
point clouds.
Based on the following time-delay embedding technique named after F.
Takens [1]_: given a time series :math:`X_t`, one extracts a list of
vectors in :math:`\\mathbb{R}^d`, each of the form
:math:`\\mathcal{X}_i := (X_{t_i}, X_{t_i + \\tau}, \\ldots , X_{t_i + (
d-1)\\tau})`. The set :math:`\\{\\mathcal{X}_i\\}_i` is called the `Takens
embedding <LINK TO GLOSSARY>`_ of the time series, :math:`\\tau` is
called the embedding time delay, :math:`d` is called the embedding
dimension, and the difference between :math:`t_i` and :math:`t_{i-1}` is
called the embedding stride.
If :math:`d` and :math:`\\tau` are not explicitly set, suitable values
are calculated during :meth:`fit`. [2]_
Parameters
----------
parameters_type : ``'search'`` | ``'fixed'``, default: ``'search'``
If set to ``'fixed'`` and if values for `embedding_time_delay` and
`dimension` are provided, these values are used in :meth:`transform`.
If set to ``'search'`` and if `embedding_time_delay` and `dimension`
are not set, optimal values are automatically found for those
parameters using criteria based on mutual information (`time_delay`)
and false nearest neighbors. [2]_
If set to 'search' and if `time_delay` and `dimension` are set,
a similar optimization is carried out, but the final values are
constrained to be not greater than the values initially set.
time_delay : int, default: ``1``
Time delay between two consecutive values for constructing one
embedded point. If `parameters_type` is ``'search'``,
it corresponds to the maximal embedding time delay that will be
considered.
dimension : int, default: ``5``
Dimension of the embedding space. If `parameters_type` is ``'search'``,
it corresponds to the maximum embedding dimension that will be
considered.
stride : int, default: ``1``
Stride duration between two consecutive embedded points. It defaults
to 1 as this is the usual value in the statement of Takens's embedding
theorem.
n_jobs : int or None, optional, default: ``None``
The number of jobs to use for the computation. ``None`` means 1 unless
in a :obj:`joblib.parallel_backend` context. ``-1`` means using all
processors.
Attributes
----------
time_delay_ : int
Actual embedding time delay used to embed. If
`parameters_type` is ``'search'``, it is the calculated optimal
embedding time delay. Otherwise it has the same value as `time_delay`.
dimension_ : int
Actual embedding dimension used to embed. If `parameters_type` is
``'search'``, it is the calculated optimal embedding dimension.
Otherwise it has the same value as `dimension`.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from giotto.time_series import TakensEmbedding
>>> # Create a noisy signal sampled
>>> signal_noise = np.asarray([np.sin(x /40) - 0.5 + np.random.random()
... for x in range(0, 1000)])
>>> # Set up the transformer
>>> outer_window_duration = 50
>>> outer_window_stride = 5
>>> embedder = TakensEmbedding(
>>> outer_window_duration=outer_window_duration,
... outer_window_stride=outer_window_stride,
... parameters_type='search',
... dimension=5,
... time_delay=1, n_jobs=-1)
>>> # Fit and transform the DataFrame
>>> embedder.fit(signal_noise)
>>> embedded_noise = embedder.transform(signal_noise)
>>> print('Optimal embedding time delay based on mutual information:',
... embedder.time_delay_)
Optimal embedding time delay based on mutual information: 1
>>> print('Optimal embedding dimension based on false nearest neighbors:',
... embedder.dimension_)
Optimal embedding dimension based on false nearest neighbors: 3
See also
--------
giotto.homology.VietorisRipsPersistence
References
----------
.. [1] F. Takens, "Detecting strange attractors in turbulence". In: Rand
D., Young LS. (eds) *Dynamical Systems and Turbulence, Warwick
1980*. Lecture Notes in Mathematics, vol 898. Springer, 1981;
doi: `10.1007/BFb0091924 <https://doi.org/10.1007/BFb0091924>`_.
.. [2] N. Sanderson, "Topological Data Analysis of Time Series using
Witness Complexes", PhD thesis, University of Colorado at
Boulder, 2018; `https://scholar.colorado.edu/math_gradetds/67
<https://scholar.colorado.edu/math_gradetds/67>`_.
"""
_hyperparameters = {'parameters_type': [str, ['fixed', 'search']],
'time_delay': [int, (1, np.inf)],
'dimension': [int, (1, np.inf)],
'stride': [int, (1, np.inf)]}
def __init__(self, parameters_type='search', time_delay=1, dimension=5,
stride=1, n_jobs=None):
self.parameters_type = parameters_type
self.time_delay = time_delay
self.dimension = dimension
self.stride = stride
self.n_jobs = n_jobs
@staticmethod
def _embed(X, time_delay, dimension, stride):
n_points = (X.shape[0] - time_delay * dimension) // stride + 1
X = np.flip(X)
points_ = [X[j * stride:
j * stride + time_delay * dimension:
time_delay].flatten() for j in range(0, n_points)]
X_embedded = np.stack(points_)
return np.flip(X_embedded).reshape((n_points, dimension))
@staticmethod
def _mutual_information(X, time_delay, n_bins):
"""Calculate the mutual information given the delay."""
contingency = np.histogram2d(X.reshape((-1,))[:-time_delay],
X.reshape((-1,))[time_delay:],
bins=n_bins)[0]
mutual_information = mutual_info_score(None, None,
contingency=contingency)
return mutual_information
@staticmethod
def _false_nearest_neighbors(X, time_delay, dimension,
stride=1):
"""Calculate the number of false nearest neighbours of embedding
dimension. """
X_embedded = TakensEmbedding._embed(X, time_delay, dimension, stride)
neighbor = NearestNeighbors(n_neighbors=2, algorithm='auto').fit(
X_embedded)
distances, indices = neighbor.kneighbors(X_embedded)
distance = distances[:, 1]
XNeighbor = X[indices[:, 1]]
epsilon = 2.0 * np.std(X)
tolerance = 10
dim_by_delay = -dimension * time_delay
non_zero_distance = distance[:dim_by_delay] > 0
false_neighbor_criteria = \
np.abs(np.roll(X, dim_by_delay)[
X.shape[0] - X_embedded.shape[0]:dim_by_delay] -
np.roll(XNeighbor, dim_by_delay)[:dim_by_delay]) \
/ distance[:dim_by_delay] > tolerance
limited_dataset_criteria = distance[:dim_by_delay] < epsilon
n_false_neighbors = np.sum(
non_zero_distance * false_neighbor_criteria *
limited_dataset_criteria)
return n_false_neighbors
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, 1)
Input data.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
"""
validate_params(self.get_params(), self._hyperparameters)
X = check_array(X.reshape(X.shape[0], 1), allow_nd=True)
if self.parameters_type == 'search':
mutual_information_list = Parallel(n_jobs=self.n_jobs)(
delayed(self._mutual_information)(X, time_delay,
n_bins=100)
for time_delay in
range(1, self.time_delay + 1))
self.time_delay_ = mutual_information_list.index(
min(mutual_information_list)) + 1
n_false_nbhrs_list = Parallel(n_jobs=self.n_jobs)(
delayed(self._false_nearest_neighbors)(
X, self.time_delay, dim,
stride=1) for dim in
range(1, self.dimension + 3))
variation_list = [np.abs(n_false_nbhrs_list[dim - 1]
- 2 * n_false_nbhrs_list[dim] +
n_false_nbhrs_list[dim + 1])
/ (n_false_nbhrs_list[dim] + 1) / dim
for dim in range(2, self.dimension + 1)]
self.dimension_ = variation_list.index(min(variation_list)) + 2
else:
self.time_delay_ = self.time_delay
self.dimension_ = self.dimension
return self
def transform(self, X, y=None):
"""Computes the embedding of X.
Parameters
----------
X : ndarray, shape (n_samples, 1)
Input data.
y : None
Ignored.
Returns
-------
Xt : ndarray, shape (n_points, n_dimension)
Array of embedded point cloud per outer window.
``n_outer_windows = (n_samples - outer_window_duration) //
outer_window_stride + 1``, and ``n_points = (
outer_window_duration - time_delay * dimension) // stride + 1``.
"""
# Check if fit had been called
check_is_fitted(self, ['time_delay_', 'dimension_'])
X = check_array(X.reshape(X.shape[0], 1), allow_nd=True)
Xt = self._embed(X, self.time_delay_, self.dimension_, self.stride)
return Xt
def resample(self, y, X=None):
"""Resample y.
Parameters
----------
y : ndarray, shape (n_samples, n_features)
Target.
X : None
There is no need of input data,
yet the pipeline API requires this parameter.
Returns
-------
yt : ndarray, shape (n_samples_new, 1)
The resampled target. ``n_samples_new = n_samples - 1``.
"""
# Check if fit had been called
check_is_fitted(self, ['time_delay_', 'dimension_'])
yt = column_or_1d(y)
yt = y[self.time_delay_ * self.dimension_ - 1:: self.stride]
return yt
| 35.61809 | 79 | 0.59375 |
acf12f836ea6f45ed93b2294ee76f0d804aff5b0 | 1,049 | py | Python | gnas/search_space/mutation.py | soolstafir/Applying-3D-U-Net-Architecture-to-the-Task-of-Multi-Organ-Segmentation-in-Computed-Tomography | c99cedc706917674a1641991a899e3fc7c925445 | [
"MIT"
] | 17 | 2019-04-02T04:24:37.000Z | 2021-12-16T02:10:20.000Z | gnas/search_space/mutation.py | soolstafir/3D-U-Net-in-CT | c99cedc706917674a1641991a899e3fc7c925445 | [
"MIT"
] | 4 | 2019-06-29T07:35:12.000Z | 2021-11-05T21:31:28.000Z | gnas/search_space/mutation.py | soolstafir/3D-U-Net-in-CT | c99cedc706917674a1641991a899e3fc7c925445 | [
"MIT"
] | 4 | 2019-12-02T09:09:20.000Z | 2021-02-17T18:57:54.000Z | import numpy as np
from gnas.search_space.individual import Individual, MultipleBlockIndividual
def flip_max_value(current_value, max_value, p):
flip = np.floor(np.random.rand(current_value.shape[0]) + p).astype('int')
sign = (2 * (np.round(np.random.rand(current_value.shape[0])) - 0.5)).astype('int')
new_dna = current_value + flip * sign
new_dna[new_dna > max_value] = 0
new_dna[new_dna < 0] = max_value[new_dna < 0]
return new_dna
def _individual_flip_mutation(individual_a, p) -> Individual:
max_values = individual_a.ss.get_max_values_vector(index=individual_a.index)
new_iv = []
for m, iv in zip(max_values, individual_a.iv):
new_iv.append(flip_max_value(iv, m, p))
return individual_a.update_individual(new_iv)
def individual_flip_mutation(individual_a, p):
if isinstance(individual_a, Individual):
return _individual_flip_mutation(individual_a, p)
else:
return MultipleBlockIndividual([_individual_flip_mutation(inv, p) for inv in individual_a.individual_list])
| 38.851852 | 115 | 0.735939 |
acf12faf5040e292d9d54ea252ef892007eb8352 | 177 | py | Python | ontask/admin/user.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | ontask/admin/user.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | ontask/admin/user.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.contrib import admin
from ontask.models import OnTaskUser
@admin.register(OnTaskUser)
class OnTaskUserAdmin(admin.ModelAdmin):
pass
| 14.75 | 40 | 0.745763 |
acf12feeed138db0a41599341cca3c8a442c0c15 | 985 | py | Python | scripts/start_indexing.py | d-kozak/enticing | 1ea9f874c6d2e4ea158e20bbf672fc45bcb4a561 | [
"MIT"
] | null | null | null | scripts/start_indexing.py | d-kozak/enticing | 1ea9f874c6d2e4ea158e20bbf672fc45bcb4a561 | [
"MIT"
] | null | null | null | scripts/start_indexing.py | d-kozak/enticing | 1ea9f874c6d2e4ea158e20bbf672fc45bcb4a561 | [
"MIT"
] | null | null | null | import os
import sys
from utils.utils import execute_command, read_default_config, init_logging
config = read_default_config()
def handle_args(args):
if len(args) != 4:
raise ValueError("server_file remote_home config.kts(on servers) collection_dir(on servers)")
if not os.path.isfile(args[0]):
raise ValueError(f"Server file {args[0]} not found")
return args[0], args[1], args[2], args[3]
def start_indexing(mg4j_dir, kts_config, enticing_home, server_file, username):
cmd = f'parallel-ssh -l {username} -h {server_file} -i {enticing_home}/scripts/node/start_indexing.sh {mg4j_dir} {kts_config}'
proc = execute_command(cmd)
return proc
def main():
init_logging(int(config['debug']['level']))
server_file, remote_home, kts_config, collection_dir = handle_args(sys.argv[1:])
proc = start_indexing(collection_dir, kts_config, remote_home, server_file, 'xkozak15')
print(proc.stdout)
if __name__ == "__main__":
main()
| 30.78125 | 130 | 0.717766 |
acf13066f1869ad41c745eccf49dded0073649ad | 1,386 | py | Python | Experimental Projects/orion_test_script/fake_orion.py | Team-Swinburne/ts_20-code-development | a80495b383628067aaaccfc2389072c31ae463b8 | [
"Apache-2.0"
] | 2 | 2020-10-24T07:46:45.000Z | 2021-06-30T03:35:48.000Z | Experimental Projects/orion_test_script/fake_orion.py | Team-Swinburne/ts_20-code-development | a80495b383628067aaaccfc2389072c31ae463b8 | [
"Apache-2.0"
] | null | null | null | Experimental Projects/orion_test_script/fake_orion.py | Team-Swinburne/ts_20-code-development | a80495b383628067aaaccfc2389072c31ae463b8 | [
"Apache-2.0"
] | 1 | 2021-03-16T04:55:31.000Z | 2021-03-16T04:55:31.000Z | import can
import time
# Constants
CANBUS_SPEED = 500000
THROTTLE_CONTROLLER_PERIPERAL_ID = 0x302
ORION_BMS_STATUS_ID = 0x100
def send_handler(bus, msg):
try:
bus.send(msg,timeout=None)
print(msg.data)
print("Message sent on {}\r".format(bus.channel_info))
except:
print("Message not sent")
def send_precharge_request(bus):
payload = [1,0,0,0,0,0,0,0]
msg = can.Message(arbitration_id=THROTTLE_CONTROLLER_PERIPERAL_ID, data=payload)
time.sleep(1)
def send_relay_status(bus):
payload = [7,0,0,0,0,0,0,0]
msg = can.Message(arbitration_id=ORION_BMS_STATUS_ID, data=payload)
send_handler(bus, msg)
time.sleep(0.00001)
def setup():
# may need to add serial=12093 <- or whatever number that is.
bus = can.interface.Bus(bustype='kvaser', channel=0, bitrate=CANBUS_SPEED)\
print(bus.get_stats())
bus.flash(flash=True)
return bus
def main():
print("starting")
bus = setup()
while(1):
i = 0
while(i < 10):
send_relay_status(bus)
# Program loop
i = i+1
print(i)
send_precharge_request(bus)
i = 0
while(1 < 100):
send_relay_status(bus)
# Program loop
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
send_precharge_request() | 21.323077 | 84 | 0.616883 |
acf1307dbe70c9aa7953b9e244ebe2a962c34a24 | 1,194 | py | Python | repos/system_upgrade/el7toel8/actors/checkirssi/actor.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/checkirssi/actor.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/checkirssi/actor.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | from leapp.actors import Actor
from leapp.libraries.common.reporting import report_with_remediation
from leapp.libraries.common.rpms import has_package
from leapp.models import InstalledRedHatSignedRPM
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
from leapp.reporting import Report
class CheckIrssi(Actor):
"""
Check if irssi is installed. If yes, write information about non-compatible changes.
"""
name = 'checkirssi'
consumes = (InstalledRedHatSignedRPM,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
if has_package(InstalledRedHatSignedRPM, 'irssi'):
report_with_remediation(
title='Irssi incompatible changes in the next major version',
summary='Disabled support for the insecure SSLv2 protocol.\n'
'Disabled SSLv3 due to the POODLE vulnerability.\n'
'Removing networks will now remove all attached servers and channels.\n'
'Removed --disable-ipv6 option.\n',
remediation='Please update your scripts to be compatible with the changes.',
severity='low')
| 41.172414 | 96 | 0.680905 |
acf131c77eddf9f1e9ad532cf5ae2fb45c97d155 | 2,073 | py | Python | mvsnet_script.py | shaochangxu/UMT-MVSNet | d502b9f28ae052186b9a92fc0ccd8c8709effd28 | [
"MIT"
] | 1 | 2021-06-25T13:32:16.000Z | 2021-06-25T13:32:16.000Z | mvsnet_script.py | shaochangxu/UMT-MVSNet | d502b9f28ae052186b9a92fc0ccd8c8709effd28 | [
"MIT"
] | null | null | null | mvsnet_script.py | shaochangxu/UMT-MVSNet | d502b9f28ae052186b9a92fc0ccd8c8709effd28 | [
"MIT"
] | null | null | null | import subprocess
import argparse
import os
from time import *
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", type=str)
parser.add_argument("--scene_path", type=str)
parser.add_argument("--pair_file", type=str)
parser.add_argument("--kernel_path", type=str)
parser.add_argument("--img_path", type=str)
parser.add_argument("--resolution", type=str)
parser.add_argument("--task_id", type=str)
parser.add_argument("--quality", type=str)
args = parser.parse_args()
dataset_path = args.dataset_path
scene_path = args.scene_path
pair_file_path = args.pair_file
kernel_path = args.kernel_path
img_path = args.img_path
resolution = args.resolution
task_id = args.task_id
fast_colmap_exe_path = kernel_path + "/acmh/colmap/build/src/exe/colmap "
colmap_exe_path = kernel_path + "/colmap/build/src/exe/colmap "
quality = args.quality
######################################################## Dense Reconstruction Start ##################################################################################
begin_time = time()
os.system(kernel_path + "/openMVS_dis_build/bin/InterfaceCOLMAP" + " -i " + dataset_path + "../Densify_temp_" + str(task_id) + "/scene.mvs" + " -o " + dataset_path + " --archive-type 1" + " -w " + img_path)
os.popen("mkdir " + dataset_path + "/dense")
os.popen("cp -r " + dataset_path + "/stereo " + dataset_path + "/dense ")
os.popen("cp -r " + dataset_path + "/sparse " + dataset_path + "/dense ")
os.popen("mkdir -p " + dataset_path + "/dense/stereo/depth_maps/")
os.popen("mkdir -p " + dataset_path + "/dense/stereo/normal_maps/")
os.popen("ln -sf " + img_path + " "+ dataset_path + "/dense/images")
os.system("python " + kernel_path + "/modify_cfg.py --pair_file " + pair_file_path + " --cfg_dir " + dataset_path + "/dense/stereo")
os.system("python " + kernel_path + "/colmap2mvsnet.py --dense_folder " + dataset_path + "/dense/" + " --whitelist " + dataset_path + "/dense/stereo/fusion.cfg")
end_time = time()
run_time = end_time - begin_time
print("dense reconstruction finished, run time is %0.2f s\n"%run_time)
| 39.113208 | 207 | 0.666667 |
acf1326e1044b28e2edf2d53e6cfbc3f5847bf87 | 11,338 | py | Python | portal/netgrep/parsehtml.py | joeleung00/webportal | ce900c4ea4a3d149910262ac00eefea3a01c70ee | [
"MIT"
] | null | null | null | portal/netgrep/parsehtml.py | joeleung00/webportal | ce900c4ea4a3d149910262ac00eefea3a01c70ee | [
"MIT"
] | null | null | null | portal/netgrep/parsehtml.py | joeleung00/webportal | ce900c4ea4a3d149910262ac00eefea3a01c70ee | [
"MIT"
] | null | null | null | import re
import urllib3
from bs4 import BeautifulSoup
from lxml import etree
''' Checklist:
1. [X] Exception handling for invalid input arguments
2. [X] Injection prevention
3. [X] Unit test
4. [V] Documentation for all exporting interfaces
'''
class ParseHtml:
def __init__(self):
pass
@staticmethod
def convert_tag_to_string(target_tag):
'''
Input:
bs4.element.Tag target_tag
the tag to be converted
Output:
string
a string concatenating all strings in target_tag
'''
if hasattr(target_tag, 'stripped_strings'):
result = ''
for string in target_tag.stripped_strings:
result += ' ' + string
result = re.sub('\s+', ' ', result).strip()
return result
else:
return ''
@staticmethod
def convert_lxml_element_to_string(target_elem, truncate=-1):
'''
Input:
lxml.etree._Element | string target_elem
the element to be converted
if it is error message, it is of string type
int truncate
maximum number of characters allowed
if truncated, "..." will be appended such that total length is exactly truncate
Output:
string
a string concatenating all strings in target_elem
'''
result = ''
if hasattr(target_elem, 'itertext') and callable(target_elem.itertext):
for string in target_elem.itertext():
if isinstance(string, str):
result += ' ' + string
elif isinstance(target_elem, str):
result += ' ' + target_elem
result = re.sub('\s+', ' ', result).strip()
if truncate >= 3:
if len(result) > truncate:
result = result[0: truncate - 3] + "..."
return result
@classmethod
def grep_tags(cls, rexpr, url):
'''
Retrieve the tags with contents matching the regular expression rexpr
Input:
list<string> rexpr
string url
The url of the target webpage
Output:
list<bs4.element.Tag | NoneType>
bs4.element.Tag: tag matching the regular expression rexpr
NoneType: URL not found / rexpr not found
'''
soup = ParseHtml._url_to_soup(url, 'html.parser')
if soup is not None:
matches = soup.find_all(text=re.compile(rexpr))
matching_tags = [tag.parent for tag in matches]
return matching_tags
else:
return [None for tag in matches]
@classmethod
def retrieve_first_tags_matches(cls, full_tags, url):
'''
For every full_tag in full_tags,
retrieve the first tag having exact match with the full_tag
Input:
list<string> full_tags
Every element is the whole starting tag of the target section, but excludes the ending tag
e.g. full_tags[0] = '<div class="login">', does not need '</div>'
string url
The url of the target webpage
Output:
list<bs4.element.Tag | NoneType>
bs4.element.Tag: tag having the first exact match for each full_tag in full_tags
NoneType: URL not found / tag not found
'''
soup = ParseHtml._url_to_soup(url, 'html.parser')
if soup is not None:
return [ParseHtml._retrieve_first_tag_match(full_tag, soup) for full_tag in full_tags]
else:
return [None for full_tag in full_tags]
@classmethod
def retrieve_hierarchical_tags_matches(cls, hierarchy_tags, url):
'''
UNDER CONSTRUCTION: standards are subject to change
For every hierarchy_tag in hierarchy_tags,
retrieve the tag with the same hierarchical position as hierarchy_tag
Input:
list<string> hierarchy_tags
Every element is a string containing the ancestors/ancestors' siblings/siblings of the target tag
Siblings are separated by comma
No ending tag is needed
e.g.
<html>
<head></head>
<body><h1> A </h1><h1> <b>B</b> <i>Target</i> </h1></body>
</html>
hierarchy_tags[0] = '<html><body><h1>,<h1><b>,<i>'
string url
The url of the target webpage
Output:
list<bs4.element.Tag | NoneType>
bs4.element.Tag: tag with the same hierarchical position as hierarchy_tag for each hierarchy_tag in hierarchy_tags
NoneType: URL not found / tag not found
'''
raise NotImplemented('netgrep: retrieve_hierarchical_tags_matches is still under construction')
@classmethod
def retrieve_xpath_matches(cls, xpath_pattern, url):
'''
Retreieve the lxml elements matching the xpath_pattern
TODO: Fix:
XPath still has problem extracting certain html tags and causing obvious artifacts
Especially for html comment blocks
Input:
string xpath_pattern
String in XPath format, which specifies the elements
string url
The url of the target webpage
Output:
list<lxml.etree._Element>
lxml.etree._Element: elements having the first exact match for xpath_pattern
empty list is returned if target is not found
'''
# TODO: Use centralized webpage getter
http_pool = urllib3.PoolManager(
timeout=urllib3.Timeout(connect=1.0, read=2.0),
retries=urllib3.Retry(2, redirect=2)
)
try:
page = http_pool.request('GET', url)
html_str = page.data.decode("utf-8")
html_elem = etree.HTML(html_str)
try:
matches = html_elem.xpath(xpath_pattern)
if isinstance(matches, list):
return matches
except etree.XPathSyntaxError as e:
return ["Error: invalid selector syntax: {}".format(e)]
except etree.XPathEvalError as e:
return ["Error: cannot evaluate selector format: {}".format(e)]
except Exception as e:
print("FATAL: unexpected error. ", e)
except urllib3.exceptions.BodyNotHttplibCompatible:
return ["Error: target site cannot be parsed"]
except urllib3.exceptions.ConnectionError:
return ["Error: error occurs during connection"]
except urllib3.exceptions.NewConnectionError:
return ["Error: fails to connect"]
except urllib3.exceptions.TimeoutError:
return ["Error: connection timeout"]
except urllib3.exceptions.MaxRetryError:
return ["Error: too many retries"]
except Exception as e:
print("FATAL: unexpected error. ", e)
return ["Error: unknown"]
''' ********** Below are private methods ********** '''
@classmethod
def _url_to_soup(cls, url, parser = 'html.parser'):
'''
Return the BeautifulSoup object corresponding to the supplied url
Does not guarantee to return a real time updated version
Input:
string url
Successful output:
BeautifulSoup soup
The BeautifulSoup object containing the contents of target website
Special output:
NoneType
Not found
'''
if parser != 'html.parser':
raise NotImplemented('netgrep: only html.parser is currently supported')
# TODO: Should be moved as class variable later to avoid multiple allocation
# TODO: Cache retrieved webpages and only refresh every some intervals
http_pool = urllib3.PoolManager(
timeout=urllib3.Timeout(connect=1.0, read=2.0),
retries=urllib3.Retry(2, redirect=2)
)
try:
page = http_pool.request('GET', url)
soup = BeautifulSoup(page.data, parser)
return soup
except urllib3.exceptions.BodyNotHttplibCompatible:
return None
except urllib3.exceptions.ConnectionError:
return None
except urllib3.exceptions.NewConnectionError:
return None
except urllib3.exceptions.TimeoutError:
return None
except urllib3.exceptions.MaxRetryError:
# TODO: raise more meaningful message
return None
except Exception as e:
print("FATAL: unexpected error. ", e)
raise
@classmethod
def _retrieve_first_tag_match(cls, full_tag, soup):
'''
Retrieve the first tag having exact match with the full_tag
This function can only handle one tag at once
Input:
string full_tag
The whole starting tag of the target section, but excludes the ending tag
e.g. full_tag = '<div class="login">', does not need '</div>'
BeautifulSoup soup
The BeautifulSoup object containing the contents of target website
Successful output:
bs4.element.Tag
containing whole section of the target part
Special output:
NoneType
Not found
'''
try:
# Evaluate the format of target tag
tag_soup = BeautifulSoup(full_tag, 'html.parser')
target_tag = tag_soup.contents[0]
# Search for first match
sections = soup.find_all(target_tag.name)
result = ''
for section in sections:
if section.attrs == target_tag.attrs:
return section
else:
return None
except:
print("Error: Trying to search for:", target_tag)
# TODO: return the reason causing error
return None
if __name__ == "__main__":
# For testings
testcase = 3
siuon_url = 'http://www.cse.cuhk.edu.hk/~siuon/csci4230/'
if testcase == 1:
#target_tags = ['<section id="topics">', '<div class="login">', '<ul class="news">', '<thead>']
target_tags = ['<>']
multiple = ParseHtml.retrieve_first_tags_matches(target_tags, siuon_url)
for tag in multiple:
if tag is not None:
print(ParseHtml.convert_tag_to_string(tag), end='\n\n')
else:
print("Not found D:", end='\n\n')
if testcase == 2:
multiple = ParseHtml.grep_tags("learning", siuon_url)
for tag in multiple:
if tag is not None:
print(ParseHtml.convert_tag_to_string(tag), end='\n\n')
else:
print("Not found D:", end='\n\n')
if testcase == 3:
#siuon_url = 'fake'
xpath_pattern = '//*[(@id = "news")]'
xpath_pattern = 'randomwrongformat'
matches = ParseHtml.retrieve_xpath_matches(xpath_pattern, siuon_url)
for elem in matches:
print(ParseHtml.convert_lxml_element_to_string(elem))
| 36.811688 | 130 | 0.576998 |
acf132bafc7eeedea90be38ca52530e1efb8d0c2 | 4,754 | py | Python | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/delete_product_request.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/delete_product_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/delete_product_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class DeleteProductRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'product_id': 'str',
'app_id': 'str'
}
attribute_map = {
'instance_id': 'Instance-Id',
'product_id': 'product_id',
'app_id': 'app_id'
}
def __init__(self, instance_id=None, product_id=None, app_id=None):
"""DeleteProductRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._product_id = None
self._app_id = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
self.product_id = product_id
if app_id is not None:
self.app_id = app_id
@property
def instance_id(self):
"""Gets the instance_id of this DeleteProductRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this DeleteProductRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this DeleteProductRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this DeleteProductRequest.
:type: str
"""
self._instance_id = instance_id
@property
def product_id(self):
"""Gets the product_id of this DeleteProductRequest.
**参数说明**:产品ID,用于唯一标识一个产品,在物联网平台创建产品后由平台分配获得。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The product_id of this DeleteProductRequest.
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this DeleteProductRequest.
**参数说明**:产品ID,用于唯一标识一个产品,在物联网平台创建产品后由平台分配获得。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param product_id: The product_id of this DeleteProductRequest.
:type: str
"""
self._product_id = product_id
@property
def app_id(self):
"""Gets the app_id of this DeleteProductRequest.
**参数说明**:资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,必须携带该参数指定要删除的产品属于哪个资源空间,否则接口会提示错误。如果用户存在多资源空间,同时又不想携带该参数,可以联系华为技术支持对用户数据做资源空间合并。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The app_id of this DeleteProductRequest.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this DeleteProductRequest.
**参数说明**:资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,必须携带该参数指定要删除的产品属于哪个资源空间,否则接口会提示错误。如果用户存在多资源空间,同时又不想携带该参数,可以联系华为技术支持对用户数据做资源空间合并。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param app_id: The app_id of this DeleteProductRequest.
:type: str
"""
self._app_id = app_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteProductRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.987805 | 177 | 0.589609 |
acf132f6bddc6b43a08be5ca611236ad68e730ac | 27,590 | py | Python | legacy/migrations/0001_initial.py | tiagocordeiro/estudio-sie | 96ba0024145d1f9e0ec7a3cbdd11e555674b23a3 | [
"MIT"
] | null | null | null | legacy/migrations/0001_initial.py | tiagocordeiro/estudio-sie | 96ba0024145d1f9e0ec7a3cbdd11e555674b23a3 | [
"MIT"
] | 8 | 2020-06-06T15:21:32.000Z | 2022-03-12T00:18:34.000Z | legacy/migrations/0001_initial.py | tiagocordeiro/estudio-sie | 96ba0024145d1f9e0ec7a3cbdd11e555674b23a3 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-07 17:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AtividadesPrincipais',
fields=[
('atp_codigo', models.AutoField(primary_key=True, serialize=False)),
('atp_nome', models.CharField(max_length=60)),
],
options={
'db_table': 'atividades_principais',
'managed': False,
},
),
migrations.CreateModel(
name='Clientes',
fields=[
('cli_codigo', models.AutoField(primary_key=True, serialize=False)),
('neg_codigo', models.PositiveIntegerField(blank=True, null=True)),
('cli_nomefantasia_nomecomercial', models.CharField(blank=True, max_length=100, null=True)),
('cli_razaosocial_nomecompleto', models.CharField(blank=True, max_length=60, null=True)),
('cli_tipopessoa', models.CharField(blank=True, max_length=1, null=True)),
('cli_cpf_cnpj', models.CharField(blank=True, max_length=20, null=True)),
('cli_rg_ie', models.CharField(blank=True, max_length=20, null=True)),
('cli_tel_ddd', models.CharField(blank=True, max_length=3, null=True)),
('cli_tel_ramal', models.CharField(blank=True, max_length=10, null=True)),
('cli_tel', models.CharField(blank=True, max_length=10, null=True)),
('cli_fax_ddd', models.CharField(blank=True, max_length=3, null=True)),
('cli_fax_ramal', models.CharField(blank=True, max_length=10, null=True)),
('cli_fax', models.CharField(blank=True, max_length=10, null=True)),
('cli_valorminimo', models.CharField(blank=True, max_length=20, null=True)),
('cli_site', models.CharField(blank=True, max_length=60, null=True)),
('cli_preferencial', models.CharField(blank=True, max_length=1, null=True)),
('cli_atividade', models.CharField(blank=True, max_length=45, null=True)),
('cli_ativo', models.CharField(blank=True, max_length=1, null=True)),
('cli_coef_prova', models.CharField(blank=True, max_length=20, null=True)),
('cli_coef_fotolito', models.CharField(blank=True, max_length=20, null=True)),
('cli_pendenciafinan', models.CharField(blank=True, max_length=45, null=True)),
('cli_chapa', models.CharField(blank=True, max_length=45, null=True)),
('cli_os_correcao', models.CharField(blank=True, max_length=1, null=True)),
('cli_descricao', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'clientes',
'managed': False,
},
),
migrations.CreateModel(
name='ClientesContatos',
fields=[
('cnt_codigo', models.AutoField(primary_key=True, serialize=False)),
('cli_codigo', models.PositiveIntegerField()),
('cnt_nome', models.CharField(blank=True, max_length=60, null=True)),
('cnt_tel_ddd', models.CharField(blank=True, max_length=3, null=True)),
('cnt_telefone', models.CharField(blank=True, max_length=10, null=True)),
('cnt_tel_ramal', models.CharField(blank=True, max_length=10, null=True)),
('cnt_cel_ddd', models.CharField(blank=True, max_length=3, null=True)),
('cnt_celular', models.CharField(blank=True, max_length=10, null=True)),
('cnt_email', models.CharField(blank=True, max_length=60, null=True)),
('cnt_setor', models.CharField(blank=True, max_length=100, null=True)),
('cnt_responsavel', models.CharField(blank=True, max_length=1, null=True)),
('cnt_idnextel', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'db_table': 'clientes_contatos',
'managed': False,
},
),
migrations.CreateModel(
name='ClientesFormatosChapa',
fields=[
('cli_codigo', models.PositiveIntegerField()),
('fcp_codigo', models.PositiveIntegerField(primary_key=True, serialize=False)),
('cfc_utiliza', models.CharField(blank=True, max_length=1, null=True)),
('cfc_pinca', models.CharField(blank=True, max_length=20, null=True)),
('cfc_coef', models.CharField(blank=True, max_length=20, null=True)),
('cfc_expessura', models.FloatField(blank=True, null=True)),
],
options={
'db_table': 'clientes_formatos_chapa',
'managed': False,
},
),
migrations.CreateModel(
name='ClientesFormatosProva',
fields=[
('cli_codigo', models.PositiveIntegerField(primary_key=True, serialize=False)),
('fpr_codigo', models.PositiveIntegerField()),
('cfp_valor', models.CharField(blank=True, max_length=20, null=True)),
],
options={
'db_table': 'clientes_formatos_prova',
'managed': False,
},
),
migrations.CreateModel(
name='ClientesServicosUtilizados',
fields=[
('cli_codigo', models.PositiveIntegerField(primary_key=True, serialize=False)),
('srv_codigo', models.PositiveIntegerField()),
],
options={
'db_table': 'clientes_servicos_utilizados',
'managed': False,
},
),
migrations.CreateModel(
name='Cores',
fields=[
('cor_valor', models.CharField(max_length=3, primary_key=True, serialize=False)),
],
options={
'db_table': 'cores',
'managed': False,
},
),
migrations.CreateModel(
name='Enderecos',
fields=[
('end_codigo', models.AutoField(primary_key=True, serialize=False)),
('cli_codigo', models.PositiveIntegerField()),
('end_descricao', models.CharField(blank=True, max_length=45, null=True)),
('end_tipo', models.CharField(blank=True, max_length=45, null=True)),
('end_logradouro', models.CharField(blank=True, max_length=60, null=True)),
('end_numero', models.CharField(blank=True, max_length=10, null=True)),
('end_complemento', models.CharField(blank=True, max_length=40, null=True)),
('end_bairro', models.CharField(blank=True, max_length=45, null=True)),
('end_zona', models.CharField(blank=True, max_length=45, null=True)),
('end_cep', models.CharField(blank=True, max_length=9, null=True)),
('end_cidade', models.CharField(blank=True, max_length=45, null=True)),
('end_estado', models.CharField(blank=True, max_length=40, null=True)),
('end_referencia', models.CharField(blank=True, max_length=250, null=True)),
],
options={
'db_table': 'enderecos',
'managed': False,
},
),
migrations.CreateModel(
name='FormatosChapa',
fields=[
('fcp_codigo', models.AutoField(primary_key=True, serialize=False)),
('fcp_formato', models.CharField(max_length=20)),
],
options={
'db_table': 'formatos_chapa',
'managed': False,
},
),
migrations.CreateModel(
name='FormatosProva',
fields=[
('fpr_codigo', models.AutoField(primary_key=True, serialize=False)),
('fpr_papel', models.CharField(blank=True, max_length=20, null=True)),
('fpr_valor', models.CharField(blank=True, max_length=20, null=True)),
],
options={
'db_table': 'formatos_prova',
'managed': False,
},
),
migrations.CreateModel(
name='HorariosEntrega',
fields=[
('hor_codigo', models.AutoField(primary_key=True, serialize=False)),
('hor_horario', models.CharField(max_length=50)),
],
options={
'db_table': 'horarios_entrega',
'managed': False,
},
),
migrations.CreateModel(
name='Lineatura',
fields=[
('lin_valor', models.PositiveIntegerField(primary_key=True, serialize=False)),
],
options={
'db_table': 'lineatura',
'managed': False,
},
),
migrations.CreateModel(
name='Mensagens',
fields=[
('msg_codigo', models.AutoField(primary_key=True, serialize=False)),
('msg_texto', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'db_table': 'mensagens',
'managed': False,
},
),
migrations.CreateModel(
name='MensagensOpcoes',
fields=[
('mop_codigo', models.AutoField(primary_key=True, serialize=False)),
('mop_texto', models.CharField(blank=True, max_length=255, null=True)),
('msg_codigo', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'db_table': 'mensagens_opcoes',
'managed': False,
},
),
migrations.CreateModel(
name='Negociacao',
fields=[
('neg_codigo', models.AutoField(primary_key=True, serialize=False)),
('neg_nome', models.CharField(max_length=50)),
],
options={
'db_table': 'negociacao',
'managed': False,
},
),
migrations.CreateModel(
name='Os',
fields=[
('os_codigo', models.AutoField(primary_key=True, serialize=False)),
('cli_codigo', models.PositiveIntegerField()),
('os_dataabertura', models.DateField()),
('os_dataprometida', models.DateField()),
('os_hora', models.TimeField()),
('up_codigo2', models.CharField(blank=True, max_length=15, null=True)),
('usu_login', models.CharField(blank=True, max_length=100, null=True)),
('hor_codigo', models.PositiveIntegerField(blank=True, null=True)),
('neg_codigo', models.CharField(blank=True, max_length=60, null=True)),
('os_status', models.CharField(blank=True, max_length=1, null=True)),
('os_correcao', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'db_table': 'os',
'managed': False,
},
),
migrations.CreateModel(
name='OsArquivos',
fields=[
('arq_codigo', models.AutoField(primary_key=True, serialize=False)),
('arq_nome', models.CharField(max_length=60)),
('arq_envio', models.CharField(blank=True, max_length=20, null=True)),
('arq_tipo', models.CharField(blank=True, max_length=60, null=True)),
('arq_programa', models.CharField(blank=True, max_length=50, null=True)),
('arq_plataforma', models.CharField(blank=True, max_length=40, null=True)),
('arq_print', models.CharField(blank=True, max_length=35, null=True)),
('os_codigo', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'db_table': 'os_arquivos',
'managed': False,
},
),
migrations.CreateModel(
name='OsConfirmacao',
fields=[
('com_codigo', models.AutoField(primary_key=True, serialize=False)),
('cli_codigo', models.PositiveIntegerField()),
('os_codigo', models.PositiveIntegerField()),
('msg_codigo', models.PositiveIntegerField(blank=True, null=True)),
('com_observacaoaprovacao', models.TextField(blank=True, null=True)),
('com_endereco', models.TextField(blank=True, null=True)),
('com_retirarestudio_entregar', models.CharField(blank=True, max_length=1, null=True)),
('com_entregaurgente', models.CharField(blank=True, max_length=1, null=True)),
('com_data', models.DateField(blank=True, null=True)),
('hor_codigo', models.IntegerField(blank=True, null=True)),
('com_observacaoentrega', models.TextField(blank=True, null=True)),
('up_codigo', models.CharField(blank=True, max_length=15, null=True)),
('com_negociacao', models.CharField(blank=True, max_length=60, null=True)),
('com_fotolito', models.FloatField(blank=True, null=True)),
('com_provas', models.FloatField(blank=True, null=True)),
('com_ctp', models.FloatField(blank=True, null=True)),
('com_plusservice', models.FloatField(blank=True, null=True)),
('com_total', models.FloatField(blank=True, null=True)),
('com_retirarestudio', models.CharField(blank=True, max_length=1, null=True)),
('end_codigo', models.PositiveIntegerField(blank=True, null=True)),
('mop_codigo', models.PositiveIntegerField(blank=True, null=True)),
('com_contato', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'db_table': 'os_confirmacao',
'managed': False,
},
),
migrations.CreateModel(
name='OsCorrecao',
fields=[
('osc_codigo', models.AutoField(primary_key=True, serialize=False)),
('os_codigo', models.IntegerField()),
('cli_codigo', models.PositiveIntegerField()),
('osc_tec_platesetter', models.CharField(blank=True, max_length=1, null=True)),
('osc_tec_apogeex', models.CharField(blank=True, max_length=1, null=True)),
('osc_tec_aplicacao', models.CharField(blank=True, max_length=1, null=True)),
('osc_tec_outro', models.CharField(blank=True, max_length=1, null=True)),
('osc_tec_processadora', models.CharField(blank=True, max_length=1, null=True)),
('osc_pro_fechamento', models.CharField(blank=True, max_length=1, null=True)),
('osc_pro_tracado', models.CharField(blank=True, max_length=1, null=True)),
('osc_pro_imagem', models.CharField(blank=True, max_length=1, null=True)),
('osc_pro_fonte', models.CharField(blank=True, max_length=1, null=True)),
('osc_pro_outro', models.CharField(blank=True, max_length=100, null=True)),
('osc_responsavel', models.CharField(blank=True, max_length=100, null=True)),
('osc_ocorrencia', models.TextField(blank=True, null=True)),
('osc_natureza', models.CharField(blank=True, max_length=1, null=True)),
],
options={
'db_table': 'os_correcao',
'managed': False,
},
),
migrations.CreateModel(
name='OsCtp',
fields=[
('ctp_codigo', models.AutoField(primary_key=True, serialize=False)),
('imp_codigo', models.PositiveIntegerField()),
('ctp_quantidade', models.PositiveIntegerField(blank=True, null=True)),
('ctp_formatoschapa', models.PositiveIntegerField(blank=True, null=True)),
('ctp_pinca', models.FloatField(blank=True, null=True)),
('ctp_valor', models.FloatField(blank=True, null=True)),
('ctp_forneada', models.CharField(blank=True, max_length=15, null=True)),
('ctp_lineatura', models.IntegerField(blank=True, null=True)),
('ctp_reticula', models.CharField(blank=True, max_length=45, null=True)),
('os_codigo', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'db_table': 'os_ctp',
'managed': False,
},
),
migrations.CreateModel(
name='OsFotolito',
fields=[
('fot_codigo', models.AutoField(primary_key=True, serialize=False)),
('imp_codigo', models.PositiveIntegerField()),
('fot_quantidade', models.PositiveIntegerField(blank=True, null=True)),
('fot_cor', models.CharField(blank=True, max_length=3, null=True)),
('fot_largura', models.CharField(blank=True, max_length=15, null=True)),
('fot_altura', models.FloatField(blank=True, null=True)),
('fot_valor', models.FloatField(blank=True, null=True)),
('os_codigo', models.PositiveIntegerField(blank=True, null=True)),
('fot_objetivo', models.CharField(blank=True, max_length=50, null=True)),
('fot_lineatura', models.IntegerField(blank=True, null=True)),
('fot_filme', models.CharField(blank=True, max_length=50, null=True)),
('fot_impressao', models.CharField(blank=True, max_length=1, null=True)),
],
options={
'db_table': 'os_fotolito',
'managed': False,
},
),
migrations.CreateModel(
name='OsImpressos',
fields=[
('imp_codigo', models.AutoField(primary_key=True, serialize=False)),
('os_codigo', models.PositiveIntegerField()),
('cli_codigo', models.PositiveIntegerField()),
('arq_codigo', models.PositiveIntegerField()),
('imp_titulo', models.CharField(blank=True, max_length=60, null=True)),
('imp_tipomontagem', models.CharField(blank=True, max_length=1, null=True)),
('imp_tipo_promo', models.CharField(blank=True, max_length=1, null=True)),
('imp_paginas_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_formato_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_cores_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_acabamento_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_montagemtipo_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_figurarfolha_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_posicaofolha_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_corte_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_aberturavertical_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_aberturahorizontal_promo', models.CharField(blank=True, max_length=50, null=True)),
('imp_observacao_promo', models.CharField(blank=True, max_length=150, null=True)),
('imp_tipo_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_montagem_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_paginas_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_formato_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_cores_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_orientacaopagina', models.CharField(blank=True, max_length=20, null=True)),
('imp_infomontagem', models.CharField(blank=True, max_length=50, null=True)),
('imp_figurasfolha_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_numeropaginas_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_posicaofolha_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_corte_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_aberturavertical_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_aberturahorizontal_edit', models.CharField(blank=True, max_length=50, null=True)),
('imp_observacao_edit', models.CharField(blank=True, max_length=150, null=True)),
],
options={
'db_table': 'os_impressos',
'managed': False,
},
),
migrations.CreateModel(
name='OsLog',
fields=[
('log_codigo', models.AutoField(primary_key=True, serialize=False)),
('os_codigo', models.PositiveIntegerField()),
('sts_status', models.CharField(blank=True, max_length=1, null=True)),
('log_datahora', models.DateTimeField(blank=True, null=True)),
('usu_login', models.CharField(blank=True, max_length=60, null=True)),
('log_observacao', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'os_log',
'managed': False,
},
),
migrations.CreateModel(
name='OsPlusService',
fields=[
('plus_codigo', models.AutoField(primary_key=True, serialize=False)),
('cli_codigo', models.PositiveIntegerField()),
('os_codigo', models.PositiveIntegerField()),
('plus_diagramacao', models.CharField(blank=True, max_length=1, null=True)),
('plus_fechamento', models.CharField(blank=True, max_length=1, null=True)),
('plus_scanner', models.CharField(blank=True, max_length=1, null=True)),
('plus_perfil', models.CharField(blank=True, max_length=1, null=True)),
('plus_calibracao', models.CharField(blank=True, max_length=1, null=True)),
('plus_outro', models.CharField(blank=True, max_length=100, null=True)),
('plus_valor', models.FloatField(blank=True, null=True)),
('plus_titulo', models.CharField(blank=True, max_length=100, null=True)),
('plus_observacao', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'db_table': 'os_plus_service',
'managed': False,
},
),
migrations.CreateModel(
name='OsProvasOutro',
fields=[
('fpo_codigo', models.AutoField(primary_key=True, serialize=False)),
('imp_codigo', models.PositiveIntegerField()),
('os_codigo', models.PositiveIntegerField(blank=True, null=True)),
('fpo_quantidade', models.PositiveIntegerField(blank=True, null=True)),
('fpo_altura', models.FloatField(blank=True, null=True)),
('fpo_largura', models.FloatField(blank=True, null=True)),
('fpo_valor', models.FloatField(blank=True, null=True)),
],
options={
'db_table': 'os_provas_outro',
'managed': False,
},
),
migrations.CreateModel(
name='OsProvasPadrao',
fields=[
('fpp_codigo', models.AutoField(primary_key=True, serialize=False)),
('imp_codigo', models.PositiveIntegerField()),
('fpp_quantidade', models.PositiveIntegerField(blank=True, null=True)),
('fpp_formato', models.PositiveIntegerField(blank=True, null=True)),
('fpp_valor', models.FloatField(blank=True, null=True)),
('os_codigo', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'db_table': 'os_provas_padrao',
'managed': False,
},
),
migrations.CreateModel(
name='OsStatus',
fields=[
('sts_status', models.CharField(max_length=1, primary_key=True, serialize=False)),
('sts_nome', models.CharField(max_length=20)),
],
options={
'db_table': 'os_status',
'managed': False,
},
),
migrations.CreateModel(
name='ServicosUtilizados',
fields=[
('srv_codigo', models.AutoField(primary_key=True, serialize=False)),
('srv_nome', models.CharField(max_length=60)),
],
options={
'db_table': 'servicos_utilizados',
'managed': False,
},
),
migrations.CreateModel(
name='Up',
fields=[
('up_codigo', models.CharField(max_length=10, primary_key=True, serialize=False)),
('up_nome', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'db_table': 'up',
'managed': False,
},
),
migrations.CreateModel(
name='Usuarios',
fields=[
('usu_login', models.CharField(max_length=60, primary_key=True, serialize=False)),
('usu_nome', models.CharField(blank=True, max_length=60, null=True)),
('usu_senha', models.CharField(max_length=32)),
('usu_status', models.CharField(blank=True, max_length=1, null=True)),
('usu_tipo', models.CharField(blank=True, max_length=50, null=True)),
('usu_lastlogin', models.DateTimeField(blank=True, null=True)),
('up_codigo', models.CharField(blank=True, max_length=15, null=True)),
('usu_excluido', models.CharField(blank=True, max_length=1, null=True)),
],
options={
'db_table': 'usuarios',
'managed': False,
},
),
migrations.CreateModel(
name='UsuariosPermissoes',
fields=[
('usu_login', models.CharField(max_length=60, primary_key=True, serialize=False)),
('per_cad_usuario', models.CharField(blank=True, max_length=1, null=True)),
('per_con_usuario', models.CharField(blank=True, max_length=1, null=True)),
('per_cad_cliente', models.CharField(blank=True, max_length=1, null=True)),
('per_con_cliente', models.CharField(blank=True, max_length=1, null=True)),
],
options={
'db_table': 'usuarios_permissoes',
'managed': False,
},
),
]
| 51.763602 | 108 | 0.555274 |
acf13521ebc375d77c914bd1717cbfdc019e6210 | 2,773 | py | Python | koans/about_scoring_project.py | shibamirai/python_koans | c1eba327dbe9534042e510dfce4e0c49c853255e | [
"MIT"
] | null | null | null | koans/about_scoring_project.py | shibamirai/python_koans | c1eba327dbe9534042e510dfce4e0c49c853255e | [
"MIT"
] | null | null | null | koans/about_scoring_project.py | shibamirai/python_koans | c1eba327dbe9534042e510dfce4e0c49c853255e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# グリードは、最大で5つまでのサイコロを振って得点を競うゲームです。
# 下記の score 関数は、サイコロ一振りの得点を計算します。
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used to calculate the
# score of a single roll of the dice.
#
# グリードは以下のように得点が決まります。
# A greed roll is scored as follows:
#
# * 1の目が3つ揃うと1000点です。
# * A set of three ones is 1000 points
#
# * 1以外の目が3つ揃うと、出た目の100倍の得点です。(例:5の目が3つで500点)
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * 3つの組以外の1の目は1個につき100点です。
# * A one (that is not part of a set of three) is worth 100 points.
#
# * 3つの組以外の5の目は1個につき50点です。
# * A five (that is not part of a set of three) is worth 50 points.
#
# * 上記以外は0点です。
# * Everything else is worth 0 points.
#
#
# 例:
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# 他の例は下記のテストコードにあります。
# More scoring examples are given in the tests below:
#
# この score 関数を完成させてください。
# Your goal is to write the score method.
def score(dice):
# この関数の処理を書いてください
# You need to write this method
pass
class AboutScoringProject(Koan):
"得点プロジェクト"
def test_score_of_an_empty_list_is_zero(self):
"空のリストは0点です"
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
"5の目が1つで50点です"
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
"1の目が1つで100点です"
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
"1のゾロ目と5のゾロ目はそれぞれの得点の合計です"
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
"2, 3, 4, 6の目は1つだけでは0点です"
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
"1の目が3つで1000点です"
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
"1以外の目が3つで、その目の100倍の得点です"
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
"得点はそれらの合計です"
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
"1つだけの得点も忘れずに"
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2]))
| 28.885417 | 79 | 0.666066 |
acf13541045885e245fd5444904b28a67ed1b492 | 9,630 | py | Python | salt/modules/postfix.py | dr4Ke/salt | 8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b | [
"Apache-2.0"
] | 1 | 2015-08-20T21:55:17.000Z | 2015-08-20T21:55:17.000Z | salt/modules/postfix.py | dr4Ke/salt | 8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b | [
"Apache-2.0"
] | null | null | null | salt/modules/postfix.py | dr4Ke/salt | 8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Support for Postfix
This module is currently little more than a config file viewer and editor. It
is able to read the master.cf file (which is one style) and files in the style
of main.cf (which is a different style, that is used in multiple postfix
configuration files).
The design of this module is such that when files are edited, a minimum of
changes are made to them. Each file should look as if it has been edited by
hand; order, comments and whitespace are all preserved.
'''
# Import python libs
import re
import logging
# Import salt libs
import salt.utils
SWWS = re.compile(r'^\s')
log = logging.getLogger(__name__)
MAIN_CF = '/etc/postfix/main.cf'
MASTER_CF = '/etc/postfix/master.cf'
def __virtual__():
'''
Only load the module if Postfix is installed
'''
if salt.utils.which('postfix'):
return True
return False
def _parse_master(path=MASTER_CF):
'''
Parse the master.cf file. This file is essentially a whitespace-delimited
columnar file. The columns are: service, type, private (yes), unpriv (yes),
chroot (yes), wakeup (never), maxproc (100), command + args.
This function parses out the columns, leaving empty lines and comments
intact. Where the value doesn't detract from the default, a dash (-) will
be used.
Returns a dict of the active config lines, and a list of the entire file,
in order. These compliment each other.
'''
with salt.utils.fopen(path, 'r') as fh_:
full_conf = fh_.read()
# Condense the file based on line continuations, but keep order, comments
# and whitespace
conf_list = []
conf_dict = {}
for line in full_conf.splitlines():
if not line.strip() or line.strip().startswith('#'):
conf_list.append(line)
continue
comps = line.strip().split()
conf_line = {
'service': comps[0],
'conn_type': comps[1],
'private': comps[2],
'unpriv': comps[3],
'chroot': comps[4],
'wakeup': comps[5],
'maxproc': comps[6],
'command': ' '.join(comps[7:]),
}
dict_key = '{0} {1}'.format(comps[0], comps[1])
conf_list.append(conf_line)
conf_dict[dict_key] = conf_line
return conf_dict, conf_list
def show_master(path=MASTER_CF):
'''
Return a dict of active config values. This does not include comments,
spacing or order.
The data returned from this function should not be used for direct
modification of the main.cf file; other functions are available for that.
CLI Examples:
salt <minion> postfix.show_master
salt <minion> postfix.show_master path=/path/to/master.cf
'''
conf_dict, conf_list = _parse_master(path) # pylint: disable=W0612
return conf_dict
def set_master(service,
conn_type,
private='y',
unpriv='y',
chroot='y',
wakeup='n',
maxproc='100',
command='',
write_conf=True,
path=MASTER_CF):
'''
Set a single config value in the master.cf file. If the value does not
already exist, it will be appended to the end.
Because of shell parsing issues, '-' cannot be set as a value, as is normal
in the master.cf file; either 'y', 'n' or a number should be used when
calling this function from the command line. If the value used matches the
default, it will internally be converted to a '-'. Calling this function
from the Python API is not affected by this limitation
The settings and their default values, in order, are: service (required),
conn_type (required), private (y), unpriv (y), chroot (y), wakeup (n),
maxproc (100), command (required).
By default, this function will write out the changes to the master.cf file,
and then returns the full contents of the file. By setting the
``write_conf`` option to ``False``, it will skip writing the file.
CLI Example:
salt <minion> postfix.set_master smtp inet n y n n 100 smtpd
'''
conf_dict, conf_list = _parse_master(path)
new_conf = []
dict_key = '{0} {1}'.format(service, conn_type)
new_line = _format_master(
service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command,
)
for line in conf_list:
if isinstance(line, dict):
if line['service'] == service and line['conn_type'] == conn_type:
# This is the one line that we're changing
new_conf.append(new_line)
else:
# No changes to this line, but it still needs to be
# formatted properly
new_conf.append(_format_master(**line))
else:
# This line is a comment or is empty
new_conf.append(line)
if dict_key not in conf_dict:
# This config value does not exist, so append it to the end
new_conf.append(new_line)
if write_conf:
_write_conf(new_conf, path)
return '\n'.join(new_conf)
def _format_master(service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command):
'''
Format the given values into the style of line normally used in the
master.cf file.
'''
#==========================================================================
#service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
#==========================================================================
#smtp inet n - n - - smtpd
if private == 'y':
private = '-'
if unpriv == 'y':
unpriv = '-'
if chroot == 'y':
chroot = '-'
if wakeup == 'n':
wakeup = '-'
maxproc = str(maxproc)
if maxproc == '100':
maxproc = '-'
conf_line = '{0:9s} {1:5s} {2:7s} {3:7s} {4:7s} {5:7s} {6:7s} {7}'.format(
service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command,
)
#print(conf_line)
return conf_line
def _parse_main(path=MAIN_CF):
'''
Parse files in the style of main.cf. This is not just a "name = value" file;
there are other rules:
* Comments start with #
* Any whitespace at the beginning of a line denotes that that line is a
continuation from the previous line.
* The whitespace rule applies to comments.
* Keys defined in the file may be referred to as variables further down in
the file.
'''
with salt.utils.fopen(path, 'r') as fh_:
full_conf = fh_.read()
# Condense the file based on line continuations, but keep order, comments
# and whitespace
conf_list = []
for line in full_conf.splitlines():
if not line.strip():
conf_list.append(line)
continue
if re.match(SWWS, line):
if not conf_list:
# This should only happen at the top of the file
conf_list.append(line)
continue
if not isinstance(conf_list[-1], str):
conf_list[-1] = ''
# This line is a continuation of the previous line
conf_list[-1] = '\n'.join([conf_list[-1], line])
else:
conf_list.append(line)
# Extract just the actual key/value pairs
pairs = {}
for line in conf_list:
if not line.strip():
continue
if line.startswith('#'):
continue
comps = line.split('=')
pairs[comps[0].strip()] = '='.join(comps[1:]).strip()
# Return both sets of data, they compliment each other elsewhere
return pairs, conf_list
def show_main(path=MAIN_CF):
'''
Return a dict of active config values. This does not include comments,
spacing or order. Bear in mind that order is functionally important in the
main.cf file, since keys can be referred to as variables. This means that
the data returned from this function should not be used for direct
modification of the main.cf file; other functions are available for that.
CLI Examples:
salt <minion> postfix.show_main
salt <minion> postfix.show_main path=/path/to/main.cf
'''
pairs, conf_list = _parse_main(path) # pylint: disable=W0612
return pairs
def set_main(key, value, path=MAIN_CF):
'''
Set a single config value in the main.cf file. If the value does not already
exist, it will be appended to the end.
CLI Example:
salt <minion> postfix.set_main mailq_path /usr/bin/mailq
'''
pairs, conf_list = _parse_main(path)
new_conf = []
if key in pairs:
for line in conf_list:
if line.startswith(key):
new_conf.append('{0} = {1}'.format(key, value))
else:
new_conf.append(line)
else:
conf_list.append('{0} = {1}'.format(key, value))
new_conf = conf_list
_write_conf(new_conf, path)
return new_conf
def _write_conf(conf, path=MAIN_CF):
'''
Write out configuration file.
'''
with salt.utils.fopen(path, 'w') as fh_:
for line in conf:
if isinstance(line, dict):
fh_.write(' '.join(line))
else:
fh_.write(line)
fh_.write('\n')
| 30.571429 | 80 | 0.584839 |
acf1387b102917341e13594057c58634ee2311d6 | 2,020 | py | Python | tests/device/tf_cfar10_device.py | antsfamily/torchtool | fd0d6e6fe6701206b15f95af145d6178a87233f9 | [
"MIT"
] | 1 | 2019-08-15T15:32:36.000Z | 2019-08-15T15:32:36.000Z | tests/device/tf_cfar10_device.py | antsfamily/torchtool | fd0d6e6fe6701206b15f95af145d6178a87233f9 | [
"MIT"
] | null | null | null | tests/device/tf_cfar10_device.py | antsfamily/torchtool | fd0d6e6fe6701206b15f95af145d6178a87233f9 | [
"MIT"
] | null | null | null | import os
import time
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
tstart = time.time()
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
tend = time.time()
print("Training time: ", tend - tstart)
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
tstart = time.time()
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
tend = time.time()
print("Testing time: ", tend - tstart)
print(test_acc)
| 28.857143 | 87 | 0.694059 |
acf138855a1f09b025a46882ca15b4ec87024f87 | 636 | py | Python | python/src/main/python/pyalink/alink/tests/examples/from_docs/test_textsinkstreamop.py | wenwei8268/Alink | c00702538c95a32403985ebd344eb6aeb81749a7 | [
"Apache-2.0"
] | null | null | null | python/src/main/python/pyalink/alink/tests/examples/from_docs/test_textsinkstreamop.py | wenwei8268/Alink | c00702538c95a32403985ebd344eb6aeb81749a7 | [
"Apache-2.0"
] | null | null | null | python/src/main/python/pyalink/alink/tests/examples/from_docs/test_textsinkstreamop.py | wenwei8268/Alink | c00702538c95a32403985ebd344eb6aeb81749a7 | [
"Apache-2.0"
] | null | null | null | import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestTextSinkStreamOp(unittest.TestCase):
def test_textsinkstreamop(self):
URL = "https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv"
SCHEMA_STR = "sepal_length double, sepal_width double, petal_length double, petal_width double, category string"
data = CsvSourceStreamOp().setFilePath(URL).setSchemaStr(SCHEMA_STR).select("category")
sink = TextSinkStreamOp().setFilePath('/tmp/text.csv').setOverwriteSink(True)
data.link(sink)
StreamOperator.execute()
pass | 39.75 | 120 | 0.709119 |
acf138a693d851e36bfd3bf3fb0d92c4d9bec9eb | 525 | py | Python | batch/batch/worker/disk.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | null | null | null | batch/batch/worker/disk.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | 19 | 2022-03-03T20:11:41.000Z | 2022-03-30T20:31:57.000Z | batch/batch/worker/disk.py | pwc2/hail | edeb70bc789c881dffa0724ddd11fcb25e689b67 | [
"MIT"
] | null | null | null | import abc
import logging
log = logging.getLogger('disk')
class CloudDisk(abc.ABC):
name: str
async def __aenter__(self, labels=None):
await self.create(labels)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.delete()
await self.close()
@abc.abstractmethod
async def create(self, labels=None):
pass
@abc.abstractmethod
async def delete(self):
pass
@abc.abstractmethod
async def close(self):
pass
| 18.103448 | 57 | 0.632381 |
acf13922dcb4158670adad312791315b98e44211 | 206 | py | Python | proto_2/ddq/fol/natural_deduction/negation.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | null | null | null | proto_2/ddq/fol/natural_deduction/negation.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | 6 | 2021-03-19T12:06:56.000Z | 2022-03-12T00:23:09.000Z | proto_2/ddq/fol/natural_deduction/negation.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | null | null | null | from ddq.fol.connective import Connective, ConnectiveNode
class Negation(Connective):
def __init__(self):
super().__init__('¬', 1, NegationNode)
class NegationNode(ConnectiveNode):
pass
| 18.727273 | 57 | 0.723301 |
acf139254575b497c7ae1e9a9e21199383538e3f | 6,055 | py | Python | library/legacy/ws2812/ws2812.py | YA-androidapp/unicorn-hat | 319b334f80da6f617ef25fa75edbdf8924f4925f | [
"MIT"
] | 28 | 2017-04-20T06:21:26.000Z | 2021-12-10T15:22:51.000Z | library/legacy/ws2812/ws2812.py | YA-androidapp/unicorn-hat | 319b334f80da6f617ef25fa75edbdf8924f4925f | [
"MIT"
] | 3 | 2017-04-05T00:41:45.000Z | 2020-04-04T00:44:24.000Z | library/legacy/ws2812/ws2812.py | YA-androidapp/unicorn-hat | 319b334f80da6f617ef25fa75edbdf8924f4925f | [
"MIT"
] | 5 | 2016-11-26T14:44:55.000Z | 2021-07-29T04:25:53.000Z | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_ws2812', [dirname(__file__)])
except ImportError:
import _ws2812
return _ws2812
if fp is not None:
try:
_mod = imp.load_module('_ws2812', fp, pathname, description)
finally:
fp.close()
return _mod
_ws2812 = swig_import_helper()
del swig_import_helper
else:
import _ws2812
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class Color_t(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Color_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Color_t, name)
__repr__ = _swig_repr
__swig_setmethods__["r"] = _ws2812.Color_t_r_set
__swig_getmethods__["r"] = _ws2812.Color_t_r_get
if _newclass:r = _swig_property(_ws2812.Color_t_r_get, _ws2812.Color_t_r_set)
__swig_setmethods__["g"] = _ws2812.Color_t_g_set
__swig_getmethods__["g"] = _ws2812.Color_t_g_get
if _newclass:g = _swig_property(_ws2812.Color_t_g_get, _ws2812.Color_t_g_set)
__swig_setmethods__["b"] = _ws2812.Color_t_b_set
__swig_getmethods__["b"] = _ws2812.Color_t_b_get
if _newclass:b = _swig_property(_ws2812.Color_t_b_get, _ws2812.Color_t_b_set)
def __init__(self):
this = _ws2812.new_Color_t()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ws2812.delete_Color_t
__del__ = lambda self : None;
Color_t_swigregister = _ws2812.Color_t_swigregister
Color_t_swigregister(Color_t)
def init(*args):
return _ws2812.init(*args)
init = _ws2812.init
def clear():
return _ws2812.clear()
clear = _ws2812.clear
def show():
return _ws2812.show()
show = _ws2812.show
def Wheel(*args):
return _ws2812.Wheel(*args)
Wheel = _ws2812.Wheel
def colorWipe(*args):
return _ws2812.colorWipe(*args)
colorWipe = _ws2812.colorWipe
def rainbow(*args):
return _ws2812.rainbow(*args)
rainbow = _ws2812.rainbow
def rainbowCycle(*args):
return _ws2812.rainbowCycle(*args)
rainbowCycle = _ws2812.rainbowCycle
def theaterChase(*args):
return _ws2812.theaterChase(*args)
theaterChase = _ws2812.theaterChase
def theaterChaseRainbow(*args):
return _ws2812.theaterChaseRainbow(*args)
theaterChaseRainbow = _ws2812.theaterChaseRainbow
def setBrightness(*args):
return _ws2812.setBrightness(*args)
setBrightness = _ws2812.setBrightness
def getBrightness():
return _ws2812.getBrightness()
getBrightness = _ws2812.getBrightness
def RGB2Color(*args):
return _ws2812.RGB2Color(*args)
RGB2Color = _ws2812.RGB2Color
def Color(*args):
return _ws2812.Color(*args)
Color = _ws2812.Color
def setPixelColor(*args):
return _ws2812.setPixelColor(*args)
setPixelColor = _ws2812.setPixelColor
def setPixelColorT(*args):
return _ws2812.setPixelColorT(*args)
setPixelColorT = _ws2812.setPixelColorT
def getPixelColor(*args):
return _ws2812.getPixelColor(*args)
getPixelColor = _ws2812.getPixelColor
def numPixels():
return _ws2812.numPixels()
numPixels = _ws2812.numPixels
def getPixels():
return _ws2812.getPixels()
getPixels = _ws2812.getPixels
def setPWMBit(*args):
return _ws2812.setPWMBit(*args)
setPWMBit = _ws2812.setPWMBit
def getPWMBit(*args):
return _ws2812.getPWMBit(*args)
getPWMBit = _ws2812.getPWMBit
def dumpLEDBuffer():
return _ws2812.dumpLEDBuffer()
dumpLEDBuffer = _ws2812.dumpLEDBuffer
def dumpPWMBuffer():
return _ws2812.dumpPWMBuffer()
dumpPWMBuffer = _ws2812.dumpPWMBuffer
def dumpPWMStatus():
return _ws2812.dumpPWMStatus()
dumpPWMStatus = _ws2812.dumpPWMStatus
def dumpPWMControl(*args):
return _ws2812.dumpPWMControl(*args)
dumpPWMControl = _ws2812.dumpPWMControl
def dumpPWMDMAC():
return _ws2812.dumpPWMDMAC()
dumpPWMDMAC = _ws2812.dumpPWMDMAC
def dumpPWM():
return _ws2812.dumpPWM()
dumpPWM = _ws2812.dumpPWM
def dumpDMARegs():
return _ws2812.dumpDMARegs()
dumpDMARegs = _ws2812.dumpDMARegs
def dumpControlBlock(*args):
return _ws2812.dumpControlBlock(*args)
dumpControlBlock = _ws2812.dumpControlBlock
def dumpTransferInformation(*args):
return _ws2812.dumpTransferInformation(*args)
dumpTransferInformation = _ws2812.dumpTransferInformation
def dumpDMA():
return _ws2812.dumpDMA()
dumpDMA = _ws2812.dumpDMA
def terminate(*args):
return _ws2812.terminate(*args)
terminate = _ws2812.terminate
# This file is compatible with both classic and new-style classes.
| 27.39819 | 90 | 0.731131 |
acf139ec19df81e6f5d265d0e8611e9c385f419b | 2,023 | py | Python | code/face_choose.py | Octemull/JustSmile | 02b41fee838b1e02366c1483f4116c182c1a97a5 | [
"MIT"
] | 1 | 2019-05-12T11:35:16.000Z | 2019-05-12T11:35:16.000Z | code/face_choose.py | Octemull/JustSmile | 02b41fee838b1e02366c1483f4116c182c1a97a5 | [
"MIT"
] | null | null | null | code/face_choose.py | Octemull/JustSmile | 02b41fee838b1e02366c1483f4116c182c1a97a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import cv2
global img, point1, point2, face_loc
def on_mouse(event, x, y, flags, param):
global img, point1, point2, face_loc,cut_img
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: #左键点击
point1 = (x,y)
cv2.circle(img2, point1, 10, (0,255,0), 5)
# window size
cv2.HoughLinesP
cv2.namedWindow("image",0);
cv2.resizeWindow("image", 800, 800);
cv2.imshow('image', img2)
# cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): #按住左键拖曳
# window size
cv2.HoughLinesP
cv2.namedWindow("image",0);
cv2.resizeWindow("image", 800, 800);
cv2.imshow('image', img2)
cv2.rectangle(img2, point1, (x,y), (255,0,0), 5)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: #左键释放
point2 = (x,y)
# window size
cv2.HoughLinesP
cv2.namedWindow("image",0);
cv2.resizeWindow("image", 800, 800);
cv2.imshow('image', img2)
cv2.rectangle(img2, point1, point2, (0,0,255), 5)
cv2.imshow('image', img2)
min_x = min(point1[0],point2[0])
min_y = min(point1[1],point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] -point2[1])
cut_img = img[min_y:min_y+height, min_x:min_x+width]
cv2.imwrite('./cut_1.jpeg', cut_img)
top = min_y
bot = min_y+height
left=min_x
right=min_x+width
face_loc=(top,bot,left,right)
def choose_face(im1):
global img,cut_img
img = cv2.imread(im1)
# window size
cv2.HoughLinesP
cv2.namedWindow("image",0);
cv2.resizeWindow("image", 800, 800);
# cv2.namedWindow('image')
cv2.setMouseCallback('image', on_mouse)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return (face_loc,cut_img)
| 30.19403 | 87 | 0.561048 |
acf13a466fa71f7522857b3e93d5844fa4d9735e | 714 | py | Python | third-party/llvm/llvm-src/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 2,338 | 2018-06-19T17:34:51.000Z | 2022-03-31T11:00:37.000Z | third-party/llvm/llvm-src/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 3,740 | 2019-01-23T15:36:48.000Z | 2022-03-31T22:01:13.000Z | third-party/llvm/llvm-src/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 500 | 2019-01-23T07:49:22.000Z | 2022-03-30T02:59:37.000Z | #!/usr/bin/env python
import sys
import time
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
print("""\
T.
QuickSubTest
InfiniteLoopSubTest
""")
sys.exit(0)
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
test_name = sys.argv[1].split('=',1)[1]
if test_name == 'T.QuickSubTest':
print('I am QuickSubTest, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'T.InfiniteLoopSubTest':
print('I am InfiniteLoopSubTest, I will hang')
while True:
pass
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))
| 23.8 | 67 | 0.647059 |
acf13bb3cf42c836ce903e86dae46e721baf1041 | 10,112 | py | Python | _build/jupyter_execute/ch04/ch4_1.py | liuzhengqi1996/math452 | 635b6ce53cb792e316abf4f47396f2e4f0686815 | [
"MIT"
] | null | null | null | _build/jupyter_execute/ch04/ch4_1.py | liuzhengqi1996/math452 | 635b6ce53cb792e316abf4f47396f2e4f0686815 | [
"MIT"
] | null | null | null | _build/jupyter_execute/ch04/ch4_1.py | liuzhengqi1996/math452 | 635b6ce53cb792e316abf4f47396f2e4f0686815 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # 4.1 Line search and gradient descent method
#
# ## 4.1.1 Gradient descent method
#
# For simplicity, let us just consider a general optimization problem
#
# $$
# \label{optmodel}
# \min_{x\in \mathbb{R}^n } f(x).
# $$ (problem)
#
# {height="5cm" width="7cm"}
#
# #### A general approach: line search method
#
# Given any initial guess $x_1$, the line search method uses the following
# algorithm
#
# $$
# \eta_t= argmin_{\eta\in \mathbb{R}^1} f(x_t - \eta p_t)\qquad \mbox{(1D minimization problem)}
# $$
#
# to produce $\{ x_{t}\}_{t=1}^{\infty}$
#
# $$
# \label{line-search}
# x_{t+1} = x_{t} - \eta_t p_t.
# $$
#
# Here $\eta_t$ is called the step size in
# optimization and also learning rate in machine learn
# ing, $p_t$ is called
# the descent direction, which is the critical component of this
# algorithm. And $x_t$ tends to
#
# $$
# x^*= argmin_{x\in \mathbb{R}^n} f(x) \iff f(x^*)=\min_{x\in \mathbb{R}^n} f(x)
# $$
#
# as $t$ tends to infinity. There is a series of optimization algorithms
# which follow the above form just using different choices of $p_t$.
#
# Then, the next natural question is what a good choice of $p_t$ is? We
# have the following theorem to show why gradient direction is a good
# choice for $p_t$.
#
# ```{admonition} lemma
# Given $x \in \mathbb{R}^n$, if $\nabla f(x)\neq 0$, the fast descent
# direction of $f$ at $x$ is the negative gradient direction, namely
#
# $$
# -\frac{\nabla f(x)}{\|\nabla f(x)\|} = \mathop{\arg\min}_{ p \in \mathbb{R}^n, \|p\|=1} \left. \frac{\partial f(x + \eta p)}{\partial \eta} \right|_{\eta=0}.
# $$
#
# It means that $f(x)$ decreases most rapidly along the negative gradient
# direction.
# ```
#
# ```{admonition} proof
# *Proof.* Let $p$ be a direction in $\mathbb{R}^{n},\|p\|=1$. Consider
# the local decrease of the function $f(\cdot)$ along direction $p$
#
# $$
# \Delta(p)=\lim _{\eta \downarrow 0} \frac{1}{\eta}\left(f(x+\eta p)-f(x)\right)=\left. \frac{\partial f(x + \eta p)}{\partial \eta} \right|_{\eta=0}.
# $$
#
# Note that
#
# $$
# \begin{split}
# \left. \frac{\partial f(x + \eta p)}{\partial \eta} \right|_{\eta=0}=\sum_{i=1}^n\left. \frac{\partial f}{\partial x_i}(x + \eta p)p_i \right|_{\eta=0} =(\nabla f, p),
# \end{split}
# $$
#
# which means that
#
# $$
# f(x+\eta p)-f(x)=\eta(\nabla f(x), p)+o(\eta) .
# $$
#
# Therefore
#
# $$
# \Delta(p)=(\nabla f(x), p).
# $$
#
# Using the Cauchy-Schwarz inequality
# $-\|x\| \cdot\|y\| \leq( x, y) \leq\|x\| \cdot\|y\|,$ we obtain
#
# $$
# -\|\nabla f(x)\| \le (\nabla f(x), p)\le \|\nabla f(x)\| .
# $$
#
# Let us take
#
# $$
# \bar{p}=-\nabla f(x) /\|\nabla f(x)\|.
# $$
#
# Then
#
# $$
# \Delta(\bar{p})=-(\nabla f(x), \nabla f(x)) /\|\nabla f(x)\|=-\|\nabla f(x)\|.
# $$
#
# The direction $-\nabla f(x)$ (the antigradient) is the direction of the
# fastest local decrease of the function $f(\cdot)$ at point $x.$ ◻
# ```
#
# Here is a simple diagram for this property.
#
# Since at each point, $f(x)$ decreases most rapidly along the negative
# gradient direction, it is then natural to choose the search direction in
# [\[line-search\]](#line-search){reference-type="eqref"
# reference="line-search"} in the negative gradient direction and the
# resulting algorithm is the so-called gradient descent method.
#
# ```{prf:algorithm} Algrihthm
# :label: my-algorithm
# Given the initial guess $x_0$, learning rate $\eta_t>0$
#
# **For** t=1,2,$\cdots$,\
#
# $$
# x_{t+1} = x_{t} - \eta_{t} \nabla f({x}_{t}),
# $$
#
# ```
#
#
#
# In practice, we need a "stopping criterion" that determines when the
# above gradient descent method to stop. One possibility is
#
# > **While** $S(x_t; f) = \|\nabla f(x_t)\|\le \epsilon$ or $t \ge T$
#
# for some small tolerance $\epsilon>0$ or maximal number of iterations
# $T$. In general, a good stopping criterion is hard to come by and it is
# a subject that has called a lot of research in optimization for machine
# learning.
#
# In the gradient method, the scalar factors for the gradients,
# $\eta_{t},$ are called the step sizes. Of course, they must be positive.
# There are many variants of the gradient method, which differ one from
# another by the step-size strategy. Let us consider the most important
# examples.
#
# 1. The sequence $\left\{\eta_t\right\}_{t=0}^{\infty}$ is chosen in
# advance. For example, (constant step)
#
# $$
# \eta_t=\frac{\eta}{\sqrt{t+1}};
# $$
#
# 2. Full relaxation:
#
# $$
# \eta_t=\arg \min _{\eta \geq 0} f\left(x_t-\eta \nabla f\left(x_t\right)\right);
# $$
#
# 3. The Armijo rule: Find $x_{t+1}=x_t-\eta \nabla f\left(x_t\right)$
# with $\eta>0$ such that
#
# $$
# \alpha\left(\nabla f\left(x_t\right), x_t-x_{t+1}\right) \leq f\left(x_t\right)-f\left(x_{t+1}\right),
# $$
#
# $$
# \beta\left(\nabla f\left(x_t\right), x_t-x_{t+1}\right) \geq f\left(x_t\right)-f\left(x_{t+1}\right),
# $$
#
# where $0<\alpha<\beta<1$ are some fixed parameters.
#
# Comparing these strategies, we see that
#
# 1. The first strategy is the simplest one. It is often used in the
# context of convex optimization. In this framework, the behavior of
# functions is much more predictable than in the general nonlinear
# case.
#
# 2. The second strategy is completely theoretical. It is never used in
# practice since even in one-dimensional case we cannot find the exact
# minimum in finite time.
#
# 3. The third strategy is used in the majority of practical algorithms.
# It has the following geometric interpretation. Let us fix
# $x \in \mathbb{R}^{n}$ assuming that $\nabla f(x) \neq 0$. Consider
# the following function of one variable:
#
# $$
# \phi (\eta)=f(x-\eta \nabla f(x)),\quad \eta\ge0.
# $$
#
# Then the
# step-size values acceptable for this strategy belong to the part of
# the graph of $\phi$ which is located between two linear functions:
#
# $$
# \phi_{1}(\eta)=f(x)-\alpha \eta\|\nabla f(x)\|^{2}, \quad \phi_{2}(\eta)=f(x)-\beta \eta\|\nabla f(x)\|^{2}
# $$
#
# Note that $\phi(0)=\phi_{1}(0)=\phi_{2}(0)$ and
# $\phi^{\prime}(0)<\phi_{2}^{\prime}(0)<\phi_{1}^{\prime}(0)<0 .$
# Therefore, the acceptable values exist unless $\phi(\cdot)$ is not
# bounded below. There are several very fast one-dimensional
# procedures for finding a point satisfying the Armijo conditions.
# However, their detailed description is not important for us now.
#
#
# ## 4.1.2 Convergence of Gradient Descent method
#
# Now we are ready to study the rate of convergence of unconstrained
# minimization schemes. For the optimization problem {eq}`problem`
#
#
# $$
# \min_{x\in \mathbb{R}^n} f(x).
# $$
#
# We assume that $f(x)$ is convex. Then we say that $x^*$ is a minimizer if
#
# $$
# f(x^*) = \min_{x \in \mathbb{R}^n} f(x).
# $$
#
# For minimizer $x^*$, we have
#
# $$
# \label{key}
# \nabla f(x^*) = 0.
# $$
#
# We have the next two properties of the minimizer
# for convex functions:
#
# 1. If $f(x) \ge c_0$, for some $c_0 \in \mathbb{R}$, then we have
#
# $$
# \mathop{\arg\min} f \neq \emptyset.
# $$
#
# 2. If $f(x)$ is $\lambda$-strongly convex, then $f(x)$ has a unique
# minimizer, namely, there exists a unique $x^*\in \mathbb{R}^n$ such
# that
#
# $$
# f(x^*) = \min_{x\in \mathbb{R}^n }f(x).
# $$
#
# To investigate the convergence of gradient descent method, let us recall
# the gradient descent method:
#
# ```{prf:algorithm} Algorithm
# :label: my-algorithm
#
# **For**: $t = 1, 2, \cdots$
#
# $$
# \label{equ:fgd-iteration}
# x_{t+1} = x_{t} - \eta_t \nabla f(x_t),
# $$
#
# where $\eta_t$ is the stepsize / learning rate.
# ```
#
# We have the next theorem about the convergence of gradient descent
# method under the Assumption.
#
# ```{admonition} Theorem
# For Gradient Descent Algorithm {prf:ref}`my-algorithm` , if
# $f(x)$ satisfies Assumption, then
#
# $$
# \|x_t - x^*\|^2 \le \alpha^t \|x_0 - x^*\|^2
# $$
#
# if $0<\eta_t <\frac{2\lambda}{L^2}$ and $\alpha < 1$.
#
# Particularly, if $\eta_t = \frac{\lambda}{L^2}$, then
#
# $$
# \|x_t - x^*\|^2 \le \left(1 - \frac{\lambda^2}{L^2}\right)^t \|x_0 - x^*\|^2.
# $$
# ```
#
# ```{admonition} Proof
# *Proof.* Note that
#
# $$
# x_{t+1} - x = x_{t} - \eta_t \nabla f(x_t) - x.
# $$
#
# By taking $L^2$ norm for both sides, we get
#
# $$
# \|x_{t+1} - x \|^2 = \|x_{t} - \eta_t \nabla f(x_t) - x \|^2.
# $$
#
# Let
# $x = x^*$. It holds that
#
# $$
# \begin{aligned}
# \|x_{t+1} - x^* \|^2 &= \| x_{t} - \eta_t \nabla f(x_t) - x^* \|^2 \\
# &= \|x_t-x^*\|^2 - 2\eta_t \nabla f(x_t)^\top (x_t - x^*) + \eta_t^2 \|\nabla f(x_t) - \nabla f(x^*)\|^2 \qquad \mbox{ (by $\nabla f(x^*)=0$)}\\
# &\le \|x_t - x^*\|^2 - 2\eta_t \lambda \|x_t - x^*\|^2 + \eta_t ^2 L^2 \|x_t - x^*\|^2 \quad
# \mbox{(by $\lambda$- strongly convex \eqref{strongConvIneq} and Lipschitz)}\\
# &\le (1 - 2\eta_t \lambda + \eta_t^2 L^2) \|x_t - x^*\|^2
# =\alpha \|x_t - x^*\|^2,
# \end{aligned}
# $$
#
# where
#
# $$
# \alpha = \left(L^2 (\eta_t -{\lambda\over L^2})^2 + 1-{\lambda^2\over L^2}\right)<1\ \mbox{if } 0< \eta_t<\frac{2\lambda}{L^2}.
# $$
#
# Particularly, if $\eta_t =\frac{\lambda}{L^2}$,
#
# $$
# \alpha=1-{\lambda^2\over L^2},
# $$
#
# which finishes the proof. ◻
# ```
#
# This means that if the learning rate is chosen appropriatly,
# $\{x_t\}_{t=1}^\infty$ from the gradient descent method will converge to
# the minimizer $x^*$ of the function.
#
# There are some issues on Gradient Descent method:
#
# - $\nabla f(x_{t})$ is very expensive to compute.
#
# - Gradient Descent method does not yield generalization accuracy.
#
# The stochastic gradient descent (SGD) method in the next section will
# focus on these two issues.
# In[ ]:
| 28.974212 | 169 | 0.586432 |
acf13da168d43eb55ea24fd31c671d01c0db573f | 1,284 | py | Python | deployment/metadata/parse_subscriptions.py | hyperglance/azure-rule-automations | ca89404b87600d683baadde8a587af54ace82d4b | [
"MIT"
] | null | null | null | deployment/metadata/parse_subscriptions.py | hyperglance/azure-rule-automations | ca89404b87600d683baadde8a587af54ace82d4b | [
"MIT"
] | null | null | null | deployment/metadata/parse_subscriptions.py | hyperglance/azure-rule-automations | ca89404b87600d683baadde8a587af54ace82d4b | [
"MIT"
] | null | null | null | import pathlib
import json
import subprocess
import os
def list_subscriptions(csv: pathlib.PurePath) -> dict:
'''parse the csv into a shallow map of subscription ids'''
try:
with open(csv) as file:
elements = file.read().replace(' ', '').split(',') # read the file and transform to list
except:
print('there was a problem parsing the list of subscriptions, returning an empty map')
return {}
result = subprocess.run(
['bash', '-c', 'az account list'] if os.name == 'posix' else ['cmd', '/C az account list'],
stdout=subprocess.PIPE)
try:
az_response = json.loads(result.stdout)
except Exception as e:
print('There was a problem parsing the api response from azure - returning an empty map')
return {}
for item in az_response:
subscription_ids = (
subscription['id'] for subscription in az_response \
if subscription['name'] in elements and subscription['isDefault'] == False
)
return dict.fromkeys(subscription_ids)
if __name__ == '__main__':
subscriptions_csv = pathlib.Path(__file__).parents[1].joinpath('terraform', 'automations', 'subscriptions.csv')
print(json.dumps(list_subscriptions(subscriptions_csv))) | 41.419355 | 115 | 0.650312 |
acf13dc08e1a598e4434ba0ae2f1fd8cfc604572 | 15,943 | py | Python | code/python/ETFProfileandPrices/v2/fds/sdk/ETFProfileandPrices/model/inline_response20029_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/ETFProfileandPrices/v2/fds/sdk/ETFProfileandPrices/model/inline_response20029_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/ETFProfileandPrices/v2/fds/sdk/ETFProfileandPrices/model/inline_response20029_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.ETFProfileandPrices.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.ETFProfileandPrices.exceptions import ApiAttributeError
class InlineResponse20029Data(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'description': (str,), # noqa: E501
'group': (str,), # noqa: E501
'cluster': (str,), # noqa: E501
'select_criteria': (str,), # noqa: E501
'weighting': (str,), # noqa: E501
'segment': (str,), # noqa: E501
'is_transparent': (bool,), # noqa: E501
'fund_of_funds': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'description': 'description', # noqa: E501
'group': 'group', # noqa: E501
'cluster': 'cluster', # noqa: E501
'select_criteria': 'selectCriteria', # noqa: E501
'weighting': 'weighting', # noqa: E501
'segment': 'segment', # noqa: E501
'is_transparent': 'isTransparent', # noqa: E501
'fund_of_funds': 'fundOfFunds', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse20029Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): Descriptive Summary of the methods used by a ETP or its index in selection and weighting of its holdings, text and standardized value available. This data is available for the US and Canada regions.. [optional] # noqa: E501
group (str): Factset's strategy group is the broadest way sort ETPs with a similar investment strategy. This data is available for the US and Canada regions.. [optional] # noqa: E501
cluster (str): Factset's strategy cluster is a collection of ETPs with a similar investment strategy, yet smaller than strategy group. This data item is more granular and narrow than the strategy group yet bigger than strategy group. This data is available for the US and Canada regions.. [optional] # noqa: E501
select_criteria (str): Description of the security selection criteria used by the ETP or its index (e.g., Market Cap, Earnings, Dividends), text and standardized value available. This data is available for all the regions.. [optional] # noqa: E501
weighting (str): Text that specifies the weighting selection criteria used by the ETP or its index (e.g., Market Cap, Equal, Momentum, Fundamental) , text and standardized value available. This data is available for all the regions.. [optional] # noqa: E501
segment (str): Determines the unique segment the ETP falls into, based on FactSet ETP Analytics rules-based classification system determined by geography, category, focus, and niche. Text and standardized value available for this data item. This data is available for the US regions. See endpoint /factset/etf/strategy/segment/list for possible values.. [optional] # noqa: E501
is_transparent (bool): Description that States whether or not the methodology behind the underlying index can be clearly understood from offering documents. This data is available for the US and Canada regions.. [optional] # noqa: E501
fund_of_funds (str): Description of the ETP that holds a portfolio of other ETPs. This data is available for the Canada regions.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse20029Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): Descriptive Summary of the methods used by a ETP or its index in selection and weighting of its holdings, text and standardized value available. This data is available for the US and Canada regions.. [optional] # noqa: E501
group (str): Factset's strategy group is the broadest way sort ETPs with a similar investment strategy. This data is available for the US and Canada regions.. [optional] # noqa: E501
cluster (str): Factset's strategy cluster is a collection of ETPs with a similar investment strategy, yet smaller than strategy group. This data item is more granular and narrow than the strategy group yet bigger than strategy group. This data is available for the US and Canada regions.. [optional] # noqa: E501
select_criteria (str): Description of the security selection criteria used by the ETP or its index (e.g., Market Cap, Earnings, Dividends), text and standardized value available. This data is available for all the regions.. [optional] # noqa: E501
weighting (str): Text that specifies the weighting selection criteria used by the ETP or its index (e.g., Market Cap, Equal, Momentum, Fundamental) , text and standardized value available. This data is available for all the regions.. [optional] # noqa: E501
segment (str): Determines the unique segment the ETP falls into, based on FactSet ETP Analytics rules-based classification system determined by geography, category, focus, and niche. Text and standardized value available for this data item. This data is available for the US regions. See endpoint /factset/etf/strategy/segment/list for possible values.. [optional] # noqa: E501
is_transparent (bool): Description that States whether or not the methodology behind the underlying index can be clearly understood from offering documents. This data is available for the US and Canada regions.. [optional] # noqa: E501
fund_of_funds (str): Description of the ETP that holds a portfolio of other ETPs. This data is available for the Canada regions.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 56.137324 | 390 | 0.617073 |
acf13df94125433a96c221bbbce646282b2bbefb | 87 | py | Python | tccli/services/iecp/__init__.py | HS-Gray/tencentcloud-cli | 3822fcfdfed570fb526fe49abe6793e2f9127f4a | [
"Apache-2.0"
] | 47 | 2018-05-31T11:26:25.000Z | 2022-03-08T02:12:45.000Z | tccli/services/iecp/__init__.py | HS-Gray/tencentcloud-cli | 3822fcfdfed570fb526fe49abe6793e2f9127f4a | [
"Apache-2.0"
] | 23 | 2018-06-14T10:46:30.000Z | 2022-02-28T02:53:09.000Z | tccli/services/iecp/__init__.py | HS-Gray/tencentcloud-cli | 3822fcfdfed570fb526fe49abe6793e2f9127f4a | [
"Apache-2.0"
] | 22 | 2018-10-22T09:49:45.000Z | 2022-03-30T08:06:04.000Z | # -*- coding: utf-8 -*-
from tccli.services.iecp.iecp_client import action_caller
| 21.75 | 57 | 0.701149 |
acf13e4b048add418f3b5de3c28c4adc9f081b0f | 9,831 | py | Python | packages/augur-core/tests/trading/test_shareToken.py | jalextowle/augur | 821653823438fd9d20eced2221c8f99f21606a02 | [
"MIT"
] | 885 | 2015-01-11T20:51:30.000Z | 2022-03-25T00:29:59.000Z | packages/augur-core/tests/trading/test_shareToken.py | jalextowle/augur | 821653823438fd9d20eced2221c8f99f21606a02 | [
"MIT"
] | 7,347 | 2015-01-17T01:05:24.000Z | 2021-11-02T17:28:19.000Z | packages/augur-core/tests/trading/test_shareToken.py | jalextowle/augur | 821653823438fd9d20eced2221c8f99f21606a02 | [
"MIT"
] | 283 | 2015-01-30T02:16:53.000Z | 2022-03-24T19:23:08.000Z | #!/usr/bin/env python
from eth_tester.exceptions import TransactionFailed
from pytest import raises
from utils import fix, AssertLog, stringToBytes, BuyWithCash, nullAddress, longTo32Bytes
from constants import YES, NO
def test_init(contractsFixture, market):
shareToken = contractsFixture.contracts['ShareToken']
assert shareToken.name() == "Shares", "currency name"
assert shareToken.symbol() == "SHARE", "currency symbol"
assert shareToken.getTypeName() == stringToBytes("ShareToken")
def test_bad_input_to_trade_functions(contractsFixture, universe, market, cash):
shareToken = contractsFixture.contracts["ShareToken"]
cost = 10 * market.getNumTicks()
account = contractsFixture.accounts[1]
cash.faucet(cost, sender=account)
# cant provide an invalid outcome
with raises(TransactionFailed):
shareToken.buyCompleteSetsForTrade(market.address, 10, 257, account, account, sender = account)
shareToken.buyCompleteSetsForTrade(market.address, 10, 1, account, account, sender = account)
# can't provide an invalid market
shareToken.setApprovalForAll(contractsFixture.accounts[0], True, sender=account)
with raises(TransactionFailed):
shareToken.sellCompleteSetsForTrade(nullAddress, 1, 10, account, account, account, account, 4, account, longTo32Bytes(11))
shareToken.sellCompleteSetsForTrade(market.address, 1, 10, account, account, account, account, 4, account, longTo32Bytes(11))
def test_safeTransferFrom(contractsFixture, universe, market, cash):
shareToken = contractsFixture.contracts['ShareToken']
with BuyWithCash(cash, 7 * market.getNumTicks(), contractsFixture.accounts[0], "complete set buy"):
shareToken.buyCompleteSets(market.address, contractsFixture.accounts[0], 7)
initialTotalSupply = shareToken.totalSupplyForMarketOutcome(market.address, 0)
initialBalance0 = shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[0])
initialBalance1 = shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[1])
tokenId = shareToken.getTokenId(market.address, 0)
with raises(TransactionFailed):
shareToken.safeTransferFrom(contractsFixture.accounts[0], contractsFixture.accounts[1], tokenId, 11, "", sender=contractsFixture.accounts[0])
with raises(TransactionFailed):
shareToken.safeTransferFrom(contractsFixture.accounts[0], contractsFixture.accounts[1], tokenId, 5, "", sender=contractsFixture.accounts[1])
with raises(TransactionFailed):
shareToken.safeTransferFrom(contractsFixture.accounts[1], contractsFixture.accounts[1], tokenId, 5, "", sender=contractsFixture.accounts[1])
shareTokenBalanceChangedLog = {
"universe": universe.address,
"account": contractsFixture.accounts[1],
"outcome": 0,
"balance": 5,
"market": market.address,
}
with AssertLog(contractsFixture, "ShareTokenBalanceChanged", shareTokenBalanceChangedLog, skip=1):
shareToken.safeTransferFrom(contractsFixture.accounts[0], contractsFixture.accounts[1], tokenId, 5, "", sender=contractsFixture.accounts[0])
afterTransferBalance0 = shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[0])
afterTransferBalance1 = shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[1])
assert(initialBalance0 - 5 == afterTransferBalance0), "Decrease in address 1's balance should equal amount transferred"
assert(initialBalance1 + 5 == afterTransferBalance1), "Increase in address 2's balance should equal amount transferred"
assert(shareToken.totalSupplyForMarketOutcome(market.address, 0) == initialTotalSupply), "Total supply should be unchanged"
def test_approve(contractsFixture, market, cash):
shareToken = contractsFixture.contracts['ShareToken']
tokenId = shareToken.getTokenId(market.address, 0)
with BuyWithCash(cash, 7 * market.getNumTicks(), contractsFixture.accounts[0], "complete set buy"):
shareToken.buyCompleteSets(market.address, contractsFixture.accounts[0], 7)
assert(shareToken.isApprovedForAll(contractsFixture.accounts[0], contractsFixture.accounts[1]) == False), "Initialy Approved"
with raises(TransactionFailed):
shareToken.safeTransferFrom(contractsFixture.accounts[0], contractsFixture.accounts[1], tokenId, 5, "", sender=contractsFixture.accounts[1])
shareToken.setApprovalForAll(contractsFixture.accounts[1], True, sender=contractsFixture.accounts[0])
assert(shareToken.isApprovedForAll(contractsFixture.accounts[0], contractsFixture.accounts[1]) == True), "Not Approved"
shareToken.safeTransferFrom(contractsFixture.accounts[0], contractsFixture.accounts[1], tokenId, 5, "", sender=contractsFixture.accounts[1])
def test_publicBuyCompleteSets(contractsFixture, universe, cash, market):
orders = contractsFixture.contracts['Orders']
shareToken = contractsFixture.contracts["ShareToken"]
assert not cash.balanceOf(contractsFixture.accounts[1])
assert universe.marketBalance(market.address) == universe.getOrCacheValidityBond()
assert universe.getOpenInterestInAttoCash() == 0
cost = 10 * market.getNumTicks()
assert cash.faucet(cost, sender=contractsFixture.accounts[1])
completeSetsPurchasedLog = {
"universe": universe.address,
"market": market.address,
"account": contractsFixture.accounts[1],
"numCompleteSets": 10,
}
marketOIChanged = {
"universe": universe.address,
"market": market.address,
"marketOI": cost,
}
with AssertLog(contractsFixture, "CompleteSetsPurchased", completeSetsPurchasedLog):
with AssertLog(contractsFixture, "MarketOIChanged", marketOIChanged):
assert shareToken.publicBuyCompleteSets(market.address, 10, sender=contractsFixture.accounts[1])
assert shareToken.balanceOfMarketOutcome(market.address, YES, contractsFixture.accounts[1]) == 10, "Should have 10 shares of outcome 1"
assert shareToken.balanceOfMarketOutcome(market.address, NO, contractsFixture.accounts[1]) == 10, "Should have 10 shares of outcome 2"
assert cash.balanceOf(contractsFixture.accounts[1]) == 0, "Sender's cash balance should be 0"
assert universe.marketBalance(market.address) == cost + universe.getOrCacheValidityBond(), "Increase in market's cash should equal the cost to purchase the complete set"
assert shareToken.totalSupplyForMarketOutcome(market.address, YES) == 10, "Increase in yes shares purchased for this market should be 10"
assert shareToken.totalSupplyForMarketOutcome(market.address, NO) == 10, "Increase in yes shares purchased for this market should be 10"
assert universe.getOpenInterestInAttoCash() == cost, "Open interest in the universe increases by the cost in ETH of the sets purchased"
def test_publicSellCompleteSets(contractsFixture, universe, cash, market):
orders = contractsFixture.contracts['Orders']
shareToken = contractsFixture.contracts["ShareToken"]
account = contractsFixture.accounts[0]
assert not cash.balanceOf(account)
assert universe.marketBalance(market.address) == universe.getOrCacheValidityBond()
cost = 10 * market.getNumTicks()
assert cash.faucet(cost)
assert universe.getOpenInterestInAttoCash() == 0
shareToken.buyCompleteSets(market.address, account, 10)
assert universe.getOpenInterestInAttoCash() == 10 * market.getNumTicks()
completeSetsSoldLog = {
"universe": universe.address,
"market": market.address,
"account": account,
"numCompleteSets": 9,
"fees": 90,
}
marketOIChanged = {
"universe": universe.address,
"market": market.address,
"marketOI": market.getNumTicks(),
}
with AssertLog(contractsFixture, "CompleteSetsSold", completeSetsSoldLog):
with AssertLog(contractsFixture, "MarketOIChanged", marketOIChanged):
result = shareToken.publicSellCompleteSets(market.address, 9, longTo32Bytes(11))
assert universe.getOpenInterestInAttoCash() == 1 * market.getNumTicks()
assert shareToken.balanceOfMarketOutcome(market.address, YES, contractsFixture.accounts[0]) == 1, "Should have 1 share of outcome yes"
assert shareToken.balanceOfMarketOutcome(market.address, NO, contractsFixture.accounts[0]) == 1, "Should have 1 share of outcome no"
assert shareToken.totalSupplyForMarketOutcome(market.address, YES) == 1
assert shareToken.totalSupplyForMarketOutcome(market.address, NO) == 1
assert cash.balanceOf(contractsFixture.accounts[0]) == 8910
assert universe.marketBalance(market.address) == universe.getOrCacheValidityBond() + 1000 + 90
assert market.marketCreatorFeesAttoCash() == 90
def test_sellCompleteSets_failure(contractsFixture, universe, cash, market):
shareToken = contractsFixture.contracts["ShareToken"]
orders = contractsFixture.contracts['Orders']
cost = 10 * market.getNumTicks()
account = contractsFixture.accounts[1]
cash.faucet(cost, sender=account)
shareToken.buyCompleteSets(market.address, account, 10, sender = account)
# sellCompleteSets exceptions
with raises(TransactionFailed):
shareToken.sellCompleteSets(market.address, account, account, 10 + 1, longTo32Bytes(11), account)
def test_maliciousMarket(contractsFixture, universe, cash, market):
shareToken = contractsFixture.contracts["ShareToken"]
orders = contractsFixture.contracts['Orders']
maliciousMarket = contractsFixture.upload('solidity_test_helpers/MaliciousMarket.sol', 'maliciousMarket', constructorArgs=[market.address])
with raises(TransactionFailed):
shareToken.buyCompleteSets(maliciousMarket.address, contractsFixture.accounts[1], 10**18, sender = contractsFixture.accounts[1])
| 51.203125 | 173 | 0.754043 |
acf13f11f4a52312b119033dc9a448532dda9879 | 2,235 | py | Python | meiduo_mall/script/regenerate_detail_html.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
] | null | null | null | meiduo_mall/script/regenerate_detail_html.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
] | null | null | null | meiduo_mall/script/regenerate_detail_html.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
sys.path.insert(0, '../')
import os
if not os.getenv('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'meiduo_mall.settings.dev'
import django
django.setup()
from django.template import loader
from django.conf import settings
from goods import models
from contents.utils import get_categories
from goods.utils import get_breadcrumb
def generate_static_sku_detail_html(sku_id):
"""
生成静态商品详情页面
:param sku_id: 商品sku id
"""
# 获取当前sku的信息
sku = models.SKU.objects.get(id=sku_id)
# 查询商品频道分类
categories = get_categories()
# 查询面包屑导航
breadcrumb = get_breadcrumb(sku.category)
# 构建当前商品的规格键
sku_specs = sku.specs.order_by('spec_id')
sku_key = []
for spec in sku_specs:
sku_key.append(spec.option.id)
# 获取当前商品的所有SKU
skus = sku.spu.sku_set.all()
# 构建不同规格参数(选项)的sku字典
spec_sku_map = {}
for s in skus:
# 获取sku的规格参数
s_specs = s.specs.order_by('spec_id')
# 用于形成规格参数-sku字典的键
key = []
for spec in s_specs:
key.append(spec.option.id)
# 向规格参数-sku字典添加记录
spec_sku_map[tuple(key)] = s.id
# 获取当前商品的规格信息
goods_specs = sku.spu.specs.order_by('id')
# 若当前sku的规格信息不完整,则不再继续
if len(sku_key) < len(goods_specs):
return
for index, spec in enumerate(goods_specs):
# 复制当前sku的规格键
key = sku_key[:]
# 该规格的选项
spec_options = spec.options.all()
for option in spec_options:
# 在规格参数sku字典中查找符合当前规格的sku
key[index] = option.id
option.sku_id = spec_sku_map.get(tuple(key))
spec.spec_options = spec_options
# 上下文
context = {
'categories': categories,
'breadcrumb': breadcrumb,
'sku': sku,
'specs': goods_specs,
}
template = loader.get_template('detail.html')
html_text = template.render(context)
file_path = os.path.join(settings.STATICFILES_DIRS[0], 'detail/'+str(sku_id)+'.html')
with open(file_path, 'w') as f:
f.write(html_text)
if __name__ == '__main__':
skus = models.SKU.objects.all()
for sku in skus:
print(sku.id)
generate_static_sku_detail_html(sku.id) | 25.988372 | 89 | 0.636689 |
acf13f3611d06ec3b97db1f582b1c888362e7ead | 4,861 | py | Python | kube_hunter/modules/discovery/apiserver.py | mormamn/kube-hunter | 14d73e201eda58eef6d873f023e39df13a9464fa | [
"Apache-2.0"
] | 2 | 2022-02-09T18:05:46.000Z | 2022-03-11T06:39:01.000Z | kube_hunter/modules/discovery/apiserver.py | mormamn/kube-hunter | 14d73e201eda58eef6d873f023e39df13a9464fa | [
"Apache-2.0"
] | null | null | null | kube_hunter/modules/discovery/apiserver.py | mormamn/kube-hunter | 14d73e201eda58eef6d873f023e39df13a9464fa | [
"Apache-2.0"
] | null | null | null | import requests
import logging
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import OpenPortEvent, Service, Event, EventFilterBase
from kube_hunter.conf import config
KNOWN_API_PORTS = [443, 6443, 8080]
logger = logging.getLogger(__name__)
class K8sApiService(Service, Event):
"""A Kubernetes API service"""
def __init__(self, protocol="https"):
Service.__init__(self, name="Unrecognized K8s API")
self.protocol = protocol
class ApiServer(Service, Event):
"""The API server is in charge of all operations on the cluster."""
def __init__(self):
Service.__init__(self, name="API Server")
self.protocol = "https"
class MetricsServer(Service, Event):
"""The Metrics server is in charge of providing resource usage metrics for pods and nodes to the API server"""
def __init__(self):
Service.__init__(self, name="Metrics Server")
self.protocol = "https"
# Other devices could have this port open, but we can check to see if it looks like a Kubernetes api
# A Kubernetes API service will respond with a JSON message that includes a "code" field for the HTTP status code
@handler.subscribe(OpenPortEvent, predicate=lambda x: x.port in KNOWN_API_PORTS)
class ApiServiceDiscovery(Discovery):
"""API Service Discovery
Checks for the existence of K8s API Services
"""
def __init__(self, event):
self.event = event
self.session = requests.Session()
self.session.verify = False
def execute(self):
logger.debug(f"Attempting to discover an API service on {self.event.host}:{self.event.port}")
protocols = ["http", "https"]
for protocol in protocols:
if self.has_api_behaviour(protocol):
self.publish_event(K8sApiService(protocol))
def has_api_behaviour(self, protocol):
try:
r = self.session.get(f"{protocol}://{self.event.host}:{self.event.port}", timeout=config.network_timeout)
if ("k8s" in r.text) or ('"code"' in r.text and r.status_code != 200):
return True
except requests.exceptions.SSLError:
logger.debug(f"{[protocol]} protocol not accepted on {self.event.host}:{self.event.port}")
except Exception:
logger.debug(f"Failed probing {self.event.host}:{self.event.port}", exc_info=True)
# Acts as a Filter for services, In the case that we can classify the API,
# We swap the filtered event with a new corresponding Service to next be published
# The classification can be regarding the context of the execution,
# Currently we classify: Metrics Server and Api Server
# If running as a pod:
# We know the Api server IP, so we can classify easily
# If not:
# We determine by accessing the /version on the service.
# Api Server will contain a major version field, while the Metrics will not
@handler.subscribe(K8sApiService)
class ApiServiceClassify(EventFilterBase):
"""API Service Classifier
Classifies an API service
"""
def __init__(self, event):
self.event = event
self.classified = False
self.session = requests.Session()
self.session.verify = False
# Using the auth token if we can, for the case that authentication is needed for our checks
if self.event.auth_token:
self.session.headers.update({"Authorization": f"Bearer {self.event.auth_token}"})
def classify_using_version_endpoint(self):
"""Tries to classify by accessing /version. if could not access succeded, returns"""
try:
endpoint = f"{self.event.protocol}://{self.event.host}:{self.event.port}/version"
versions = self.session.get(endpoint, timeout=config.network_timeout).json()
if "major" in versions:
if versions.get("major") == "":
self.event = MetricsServer()
else:
self.event = ApiServer()
except Exception:
logging.warning("Could not access /version on API service", exc_info=True)
def execute(self):
discovered_protocol = self.event.protocol
# if running as pod
if self.event.kubeservicehost:
# if the host is the api server's IP, we know it's the Api Server
if self.event.kubeservicehost == str(self.event.host):
self.event = ApiServer()
else:
self.event = MetricsServer()
# if not running as pod.
else:
self.classify_using_version_endpoint()
# in any case, making sure to link previously discovered protocol
self.event.protocol = discovered_protocol
# If some check classified the Service,
# the event will have been replaced.
return self.event
| 38.888 | 117 | 0.668175 |
acf14149616d40558a80bdb288d8e36d760dc2e6 | 721 | py | Python | notes/2018-05-14-single-view-continuous-svd/calculations/test-mic.py | talonchandler/polharmonic | 2aa3ca984e11050f901579b8eaa45a3a61d07957 | [
"MIT"
] | null | null | null | notes/2018-05-14-single-view-continuous-svd/calculations/test-mic.py | talonchandler/polharmonic | 2aa3ca984e11050f901579b8eaa45a3a61d07957 | [
"MIT"
] | null | null | null | notes/2018-05-14-single-view-continuous-svd/calculations/test-mic.py | talonchandler/polharmonic | 2aa3ca984e11050f901579b8eaa45a3a61d07957 | [
"MIT"
] | null | null | null | from polharmonic import det, ill, micro
import numpy as np
n_px=2**7 + 1
folder='out/'
i1 = ill.Illuminator(polarizer=False)
d1 = det.Detector(polarizer=True) # True
m1 = micro.Microscope(ill=i1, det=d1)
m1.plot(m1.h, filename=folder+'hhdet.pdf', n_px=n_px, contours=False)
# m1.plot(m1.H, filename=folder+'Hdet.pdf', n_px=n_px, contours=True)
# m1.calc_SVD(n_px=n_px)
# m1.plot_SVS(filename=folder+'SVSdet.pdf')
i2 = ill.Illuminator(polarizer=True)
d2 = det.Detector(polarizer=False)
m2 = micro.Microscope(ill=i2, det=d2)
m1.plot(m2.h, filename=folder+'hhill.pdf', n_px=n_px, contours=False)
# m2.plot(m2.H, filename=folder+'Hill.pdf', n_px=n_px)
# m2.calc_SVD(n_px=n_px)
# m2.plot_SVS(filename=folder+'SVSill.pdf')
| 31.347826 | 69 | 0.730929 |
acf1419278d44b0b0c3974ef01e01e4ad229caca | 1,345 | py | Python | code/two-dimensional/plot_kpp.py | manuel-quezada/BP_Lim_for_imp_RK_Methods | 5d5f84122959c1bd779a7ff4eaa676f35db37757 | [
"MIT"
] | null | null | null | code/two-dimensional/plot_kpp.py | manuel-quezada/BP_Lim_for_imp_RK_Methods | 5d5f84122959c1bd779a7ff4eaa676f35db37757 | [
"MIT"
] | null | null | null | code/two-dimensional/plot_kpp.py | manuel-quezada/BP_Lim_for_imp_RK_Methods | 5d5f84122959c1bd779a7ff4eaa676f35db37757 | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # <--- This is important for 3d plotting
Q=np.genfromtxt('solution_time_1.0.csv',delimiter=',')
mx=Q.shape[0]; my=Q.shape[1]
xlower=-2.0; xupper=2.0
ylower=-2.5; yupper=1.5
nghost=2
dx = (xupper-xlower)/(mx)
dy = (yupper-ylower)/(my)
x = np.linspace(xlower-(2*nghost-1)*dx/2,xupper+(2*nghost-1)*dx/2,mx+2*nghost)
y = np.linspace(ylower-(2*nghost-1)*dy/2,yupper+(2*nghost-1)*dy/2,my+2*nghost)
xx,yy = np.meshgrid(x,y)
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
N = 256
vals = np.ones((N, 4))
vals[:, 0] = np.linspace(256/256, 256/256, N)
vals[:, 1] = np.linspace(256/256, 20/256, N)
vals[:, 2] = np.linspace(256/256, 147/256, N)
newcmp = ListedColormap(vals)
# plot
plt.figure(figsize=(5,5))
plt.pcolor(xx[2:-2,2:-2],yy[2:-2,2:-2],Q,cmap=newcmp)
#fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
#surf = ax.plot_surface(xx[2:-2,2:-2], yy[2:-2,2:-2], Q, cmap='jet',
# linewidth=1, antialiased=True)
#ax.view_init(elev=40., azim=45)
plt.contour(xx[2:-2,2:-2],yy[2:-2,2:-2],Q,20,colors='black')
plt.clim(np.pi/4.0,14*np.pi/4.0)
plt.xticks([-1.5,-0.5,0.5,1.5])
plt.yticks([-2,-1,0,1])
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.savefig('kpp.png')
| 31.27907 | 81 | 0.66171 |
acf1425b4373ff544ee27e146b0d077fef33235d | 2,113 | py | Python | ckan/tests/model/test_resource.py | ziveo/ckan | f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c | [
"Apache-2.0"
] | 58 | 2015-01-11T09:05:15.000Z | 2022-03-17T23:44:07.000Z | ckan/tests/model/test_resource.py | ziveo/ckan | f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c | [
"Apache-2.0"
] | 1,467 | 2015-01-01T16:47:44.000Z | 2022-02-28T16:51:20.000Z | ckan/tests/model/test_resource.py | ziveo/ckan | f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c | [
"Apache-2.0"
] | 17 | 2015-05-06T14:04:21.000Z | 2021-11-11T19:58:16.000Z | # encoding: utf-8
import pytest
import ckan.model as model
import ckan.tests.factories as factories
Resource = model.Resource
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestReousrce(object):
def test_edit_url(self):
res_dict = factories.Resource(url="http://first")
res = Resource.get(res_dict["id"])
res.url = "http://second"
model.repo.commit_and_remove()
res = Resource.get(res_dict["id"])
assert res.url == "http://second"
def test_edit_extra(self):
res_dict = factories.Resource(newfield="first")
res = Resource.get(res_dict["id"])
res.extras = {"newfield": "second"}
model.repo.commit_and_remove()
res = Resource.get(res_dict["id"])
assert res.extras["newfield"] == "second"
def test_get_all_without_views_returns_all_resources_without_views(self):
# Create resource with resource_view
factories.ResourceView()
expected_resources = [
factories.Resource(format="format"),
factories.Resource(format="other_format"),
]
resources = Resource.get_all_without_views()
expected_resources_ids = [r["id"] for r in expected_resources]
resources_ids = [r.id for r in resources]
assert expected_resources_ids.sort() == resources_ids.sort()
def test_get_all_without_views_accepts_list_of_formats_ignoring_case(self):
factories.Resource(format="other_format")
resource_id = factories.Resource(format="format")["id"]
resources = Resource.get_all_without_views(["FORMAT"])
length = len(resources)
assert length == 1, "Expected 1 resource, but got %d" % length
assert [resources[0].id] == [resource_id]
def test_resource_count(self):
"""Resource.count() should return a count of instances of Resource
class"""
assert Resource.count() == 0
factories.Resource()
factories.Resource()
factories.Resource()
assert Resource.count() == 3
| 33.015625 | 79 | 0.657359 |
acf14261518769539ca63aecb27e989b5d3a78b8 | 1,807 | py | Python | igibson/scenes/stadium_scene.py | fxia22/gibson_demos | 5f8d253694b23b41c53959774203ba5787578b74 | [
"MIT"
] | 1 | 2021-08-03T23:59:21.000Z | 2021-08-03T23:59:21.000Z | igibson/scenes/stadium_scene.py | fxia22/gibson_demos | 5f8d253694b23b41c53959774203ba5787578b74 | [
"MIT"
] | null | null | null | igibson/scenes/stadium_scene.py | fxia22/gibson_demos | 5f8d253694b23b41c53959774203ba5787578b74 | [
"MIT"
] | 1 | 2021-12-01T16:09:01.000Z | 2021-12-01T16:09:01.000Z | import logging
import numpy as np
from igibson.utils.utils import l2_distance
import pybullet_data
import pybullet as p
import os
from igibson.scenes.scene_base import Scene
class StadiumScene(Scene):
"""
A simple stadium scene for debugging
"""
def __init__(self):
super(StadiumScene, self).__init__()
def load(self):
"""
Load the scene into pybullet
"""
filename = os.path.join(
pybullet_data.getDataPath(), "stadium_no_collision.sdf")
self.stadium = p.loadSDF(filename)
plane_file = os.path.join(
pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
self.floor_body_ids += [p.loadMJCF(plane_file)[0]]
pos, orn = p.getBasePositionAndOrientation(self.floor_body_ids[0])
p.resetBasePositionAndOrientation(
self.floor_body_ids[0], [pos[0], pos[1], pos[2] - 0.005], orn)
p.changeVisualShape(
self.floor_body_ids[0], -1, rgbaColor=[1, 1, 1, 0.5])
return list(self.stadium) + self.floor_body_ids
def get_random_point(self, floor=None):
"""
Get a random point in the region of [-5, 5] x [-5, 5]
"""
return floor, np.array([
np.random.uniform(-5, 5),
np.random.uniform(-5, 5),
0.0,
])
def get_shortest_path(self, floor, source_world, target_world, entire_path=False):
"""
Get a trivial shortest path because the scene is empty
"""
logging.warning(
'WARNING: trying to compute the shortest path in StadiumScene (assuming empty space)')
shortest_path = np.stack((source_world, target_world))
geodesic_distance = l2_distance(source_world, target_world)
return shortest_path, geodesic_distance
| 33.462963 | 98 | 0.62922 |
acf1430d783c9cd0aa9b6377cc4bcb55907805c1 | 2,393 | py | Python | ising/tests/test_thermo.py | cgbriggs99/ising | 656d570140f0e385a4fae906224acfffa98f9b23 | [
"BSD-3-Clause"
] | null | null | null | ising/tests/test_thermo.py | cgbriggs99/ising | 656d570140f0e385a4fae906224acfffa98f9b23 | [
"BSD-3-Clause"
] | 8 | 2022-03-03T18:14:41.000Z | 2022-03-23T04:56:32.000Z | ising/tests/test_thermo.py | cgbriggs99/ising | 656d570140f0e385a4fae906224acfffa98f9b23 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
"""
Test the thermodynamic calculations.
"""
import math
import numpy as np
import pytest
import ising
__LENGTH = 10
__J = -2
__M = 1.1
__K = 1
__TEMP = 298.15
def test_thermo():
"""
Test the partition, average, and variance.
"""
# Set up the hamiltonian.
ham = ising.hamiltonian.PeriodicHamiltonian(__J, __M)
# Set the strategy.
ising.thermo.ThermoMethod.getsingleton().setstrat(
ising.thermo.FullCalcStrategy.getsingleton()
)
part = ising.thermo.ThermoMethod.getsingleton().partition(
ham, __LENGTH, __TEMP, __K
)
# Make sure nothing blows up
assert math.isfinite(part) and part > 0
energy = ising.thermo.ThermoMethod.getsingleton().average(
ham.energy, ham, __LENGTH, temp=__TEMP, boltzmann=__K
)
assert (
math.isfinite(energy)
and math.log10(abs(energy)) < 12
and math.log10(abs(energy)) > -12
)
variance = ising.thermo.ThermoMethod.getsingleton().variance(
ham.energy, ham, __LENGTH, temp=__TEMP, boltzmann=__K
)
assert math.isfinite(variance) and math.log10(abs(variance)) < 12
def test_plots():
"""
Test energy, heat capacity, and magnetic susceptibility.
"""
plots = ising.thermo.PlotValsMethod.getsingleton()
plots.setstrat(ising.thermo.SequentialStrategy.getsingleton())
ham = ising.hamiltonian.PeriodicHamiltonian(__J, __M)
points = list(np.linspace(0.1, 20))
assert isinstance(plots.getstrat(), ising.thermo.SequentialStrategy)
seq_e, seq_h, seq_m = plots.calc_plot_vals(ham, __LENGTH, points, __K)
plots.setstrat(ising.thermo.ThreadedStrategy.getsingleton())
thr_e, thr_h, thr_m = plots.calc_plot_vals(ham, __LENGTH, points, __K)
plots.setstrat(ising.fastcwrapper.CPlotStrategy.getsingleton())
cpl_e, cpl_h, cpl_m = plots.calc_plot_vals(ham, __LENGTH, points, __K)
assert all(
map(
lambda s, t, c: abs(s - c) <= 1e-4 and abs(t - c) <= 1e-4,
seq_e,
thr_e,
cpl_e,
)
)
assert all(
map(
lambda s, t, c: abs(s - c) <= 1e-4 and abs(t - c) <= 1e-4,
seq_h,
thr_h,
cpl_h,
)
)
assert all(
map(
lambda s, t, c: abs(s - c) <= 1e-4 and abs(t - c) <= 1e-4,
seq_m,
thr_m,
cpl_m,
)
)
| 27.193182 | 74 | 0.615963 |
acf14415ce13c14705896774b424d0a2b9c4e655 | 29,403 | py | Python | pyplusplus/decl_wrappers/calldef_wrapper.py | asford/pyplusplus | 18485e9013e30b1f7776b6039eeaa2fbdb73f183 | [
"BSL-1.0"
] | 3 | 2016-10-25T11:24:46.000Z | 2020-12-14T09:07:28.000Z | pyplusplus/decl_wrappers/calldef_wrapper.py | asford/pyplusplus | 18485e9013e30b1f7776b6039eeaa2fbdb73f183 | [
"BSL-1.0"
] | null | null | null | pyplusplus/decl_wrappers/calldef_wrapper.py | asford/pyplusplus | 18485e9013e30b1f7776b6039eeaa2fbdb73f183 | [
"BSL-1.0"
] | 3 | 2016-04-06T15:16:49.000Z | 2019-01-15T07:08:29.000Z | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""contains classes that allow to configure code generation for free\\member functions, operators and etc."""
import os
from . import user_text
from . import algorithm
from . import decl_wrapper
from pyplusplus import messages
from pygccxml import declarations
from pyplusplus import function_transformers as ft
class calldef_t(decl_wrapper.decl_wrapper_t):
"""base class, for code generator configuration, for function declaration classes."""
BOOST_PYTHON_MAX_ARITY = 10
"""Boost.Python configuration macro value.
A function has more than BOOST_PYTHON_MAX_ARITY arguments, will not compile.
You should adjust BOOST_PYTHON_MAX_ARITY macro.
For more information see: http://mail.python.org/pipermail/c++-sig/2002-June/001554.html
"""
def __init__(self, *arguments, **keywords):
decl_wrapper.decl_wrapper_t.__init__( self, *arguments, **keywords )
self._call_policies = None
self._use_keywords = True
self._use_default_arguments = True
self._create_with_signature = None
self._overridable = None
self._non_overridable_reason = None
self._transformations = None
def get_call_policies(self):
return self._call_policies
def set_call_policies(self, call_policies):
self._call_policies = call_policies
call_policies = property( get_call_policies, set_call_policies
, doc="reference to :class:`decl_wrappers.call_policy_t` class." \
+"Default value is calculated at runtime, based on return value.")
def _get_use_keywords(self):
return self._use_keywords and bool( self.arguments )
def _set_use_keywords(self, use_keywords):
self._use_keywords = use_keywords
use_keywords = property( _get_use_keywords, _set_use_keywords
, doc="boolean, if True, allows to call function from Python using keyword arguments." \
+"Default value is True.")
def _get_create_with_signature(self):
if None is self._create_with_signature:
self._create_with_signature = bool( self.overloads )
if not self._create_with_signature and declarations.templates.is_instantiation( self.name ):
self._create_with_signature = True
if not self._create_with_signature and isinstance( self.parent, declarations.class_t ):
for hi in self.parent.recursive_bases:
if hi.access_type == 'private':
continue
funcs = hi.related_class.calldefs( self.name, recursive=False, allow_empty=True )
for f in funcs:
if f.argument_types != self.argument_types:
self._create_with_signature = True
break
if self._create_with_signature:
break
if not self._create_with_signature:
self._create_with_signature \
= bool( self.parent.calldefs( self.name, recursive=False, allow_empty=True ) )
return self._create_with_signature
def _set_create_with_signature(self, create_with_signature):
self._create_with_signature = create_with_signature
create_with_signature = property( _get_create_with_signature, _set_create_with_signature
, doc="boolean, if True `Py++` will generate next code: def( ..., function type( function ref )"\
+"Thus, the generated code is safe, when a user creates function overloading." \
+"Default value is computed, based on information from the declarations tree" )
def _get_use_default_arguments(self):
return self._use_default_arguments
def _set_use_default_arguments(self, use_default_arguments):
self._use_default_arguments = use_default_arguments
use_default_arguments = property( _get_use_default_arguments, _set_use_default_arguments
, doc="boolean, if True `Py++` will generate code that will set default arguments" \
+"Default value is True.")
def has_wrapper( self ):
"""returns True, if function - wrapper is needed
The functionality by this function is incomplete. So please don't
use it in your code.
"""
if not isinstance( self, declarations.member_calldef_t ):
return False
elif self.virtuality == declarations.VIRTUALITY_TYPES.PURE_VIRTUAL:
return True
elif self.access_type == declarations.ACCESS_TYPES.PROTECTED:
return True
else:
return False
def get_overridable( self ):
"""Check if the method can be overridden."""
if None is self._overridable:
if isinstance( self, declarations.member_calldef_t ) \
and self.virtuality != declarations.VIRTUALITY_TYPES.NOT_VIRTUAL \
and declarations.is_reference( self.return_type ):
self._overridable = False
self._non_overridable_reason = messages.W1049
else:
self._overridable = True
self._non_overridable_reason = ""
return self._overridable
def set_overridable( self, overridable ):
self._overridable = overridable
overridable = property( get_overridable, set_overridable
, doc = get_overridable.__doc__ )
@property
def non_overridable_reason( self ):
"""returns the reason the function could not be overridden"""
return self._non_overridable_reason
def mark_as_non_overridable( self, reason ):
"""
mark this function as final - user will not be able to override it from Python
Not all functions could be overridden from Python, for example virtual function
that returns non const reference to a member variable. `Py++` allows you to
mark these functions and provide and explanation to the user.
"""
self.overridable = False
self._non_overridable_reason = messages.W0000 % reason
@property
def transformations(self):
"""return list of function transformations that should be applied on the function"""
if None is self._transformations:
#TODO: for trivial cases get_size( int&, int& ) `Py++` should guess
#function transformers
self._transformations = []
return self._transformations
def add_transformation(self, *transformer_creators, **keywd):
"""add new function transformation.
transformer_creators - list of transformer creators, which should be applied on the function
keywd - keyword arguments for :class:`function_transformers.function_transformation_t` class initialization
"""
self.transformations.append( ft.function_transformation_t( self, transformer_creators, **keywd ) )
def _exportable_impl_derived( self ):
return ''
def _exportable_impl( self ):
if self.transformations:
#It is possible that the function asked for the user attention.
#The user paid attention and created a transformation.
#Py++ should be silent in this case.
return ''
if not self.parent.name:
return messages.W1057 % str( self )
all_types = [ arg.type for arg in self.arguments ]
all_types.append( self.return_type )
for some_type in all_types:
if isinstance( some_type, declarations.ellipsis_t ):
return messages.W1053 % str( self )
units = declarations.decompose_type( some_type )
ptr2functions = [unit for unit in units if isinstance( unit, declarations.calldef_type_t )]
if ptr2functions:
return messages.W1004
#Function that take as agrument some instance of non public class
#will not be exported. Same to the return variable
if isinstance( units[-1], declarations.declarated_t ):
dtype = units[-1]
if isinstance( dtype.declaration.parent, declarations.class_t ):
if dtype.declaration not in dtype.declaration.parent.public_members:
return messages.W1005
no_ref = declarations.remove_reference( some_type )
no_ptr = declarations.remove_pointer( no_ref )
no_const = declarations.remove_const( no_ptr )
if declarations.is_array( no_const ):
return messages.W1006
return self._exportable_impl_derived()
def _readme_impl( self ):
def is_double_ptr( type_ ):
#check for X**
if not declarations.is_pointer( type_ ):
return False
base = declarations.remove_pointer( type_ )
return declarations.is_pointer( base )
def suspicious_type( type_ ):
if not declarations.is_reference( type_ ):
return False
type_no_ref = declarations.remove_reference( type_ )
return not declarations.is_const( type_no_ref ) \
and ( declarations.is_fundamental( type_no_ref )
or declarations.is_enum( type_no_ref ) )
msgs = []
#TODO: functions that takes as argument pointer to pointer to smth, could not be exported
#see http://www.boost.org/libs/python/doc/v2/faq.html#funcptr
if len( self.arguments ) > calldef_t.BOOST_PYTHON_MAX_ARITY:
msgs.append( messages.W1007 % ( calldef_t.BOOST_PYTHON_MAX_ARITY, len( self.arguments ) ) )
if self.transformations:
#if user defined transformation, than I think it took care of the problems
ft = self.transformations[0]
if ft.alias == ft.unique_name:
msgs.append( messages.W1044 % ft.alias )
return msgs
if suspicious_type( self.return_type ) and None is self.call_policies:
msgs.append( messages.W1008 )
if ( declarations.is_pointer( self.return_type ) or is_double_ptr( self.return_type ) ) \
and None is self.call_policies:
msgs.append( messages.W1050 % str(self.return_type) )
for index, arg in enumerate( self.arguments ):
if suspicious_type( arg.type ):
msgs.append( messages.W1009 % ( arg.name, index ) )
if is_double_ptr( arg.type ):
msgs.append( messages.W1051 % ( arg.name, index, str(arg.type) ) )
if False == self.overridable:
msgs.append( self._non_overridable_reason)
problematics = algorithm.registration_order.select_problematics( self )
if problematics:
tmp = []
for f in problematics:
tmp.append( os.linesep + '\t' + str(f) )
msgs.append( messages.W1010 % os.linesep.join( tmp ) )
return msgs
class member_function_t( declarations.member_function_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the member function"""
def __init__(self, *arguments, **keywords):
declarations.member_function_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._use_overload_macro = False
self._override_precall_code = []
self._overide_native_precall_code = []
self._default_precall_code = []
self._adaptor = None
def _get_adaptor(self):
return self._adaptor
def _set_adaptor(self, adaptor):
self._adaptor = adaptor
adaptor = property( _get_adaptor, _set_adaptor
, doc="string, if contains value `Py++` will generate code the following code: " \
+".def(<name>, <adaptor>(<function reference>), <other args> ) " \
+". The property is relevant for public, non virtual member functions." )
def add_override_precall_code(self, code):
"""add code, which should be executed, before overridden member function call"""
self._override_precall_code.append( code )
@property
def override_precall_code(self):
"""code, which should be executed, before overrided member function call"""
return self._override_precall_code
def add_override_native_precall_code(self, code):
"""add code, which should be executed, before native member function call"""
self._overide_native_precall_code.append( code )
@property
def override_native_precall_code(self):
"""code, which should be executed, before overrided member function call"""
return self._overide_native_precall_code
def add_default_precall_code(self, code):
"""add code, which should be executed, before this member function call"""
self._default_precall_code.append( code )
@property
def default_precall_code(self):
"""code, which should be executed, before this member function call"""
return self._default_precall_code
def get_use_overload_macro(self):
return self._use_overload_macro
def set_use_overload_macro(self, use_macro):
self._use_overload_macro = use_macro
use_overload_macro = property( get_use_overload_macro, set_use_overload_macro
, doc="boolean, if True, will use BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS macro to expose declarations" \
+"Default value is False.")
def _exportable_impl_derived(self):
if self.access_type == declarations.ACCESS_TYPES.PRIVATE \
and self.virtuality == declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
return messages.W1011
return ''
def _readme_impl( self ):
msgs = super( member_function_t, self )._readme_impl()
if self.does_throw == False \
and self.virtuality != declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
msgs.append( messages.W1046 )
return msgs
class constructor_t( declarations.constructor_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the constructor"""
def __init__(self, *arguments, **keywords):
declarations.constructor_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._body = ''
self._allow_implicit_conversion = True
def _get_body(self):
return self._body
def _set_body(self, body):
self._body = body
body = property( _get_body, _set_body
, doc="string, class-wrapper constructor body" )
def _exportable_impl_derived( self ):
if self.is_artificial:
return messages.W1012
if self.access_type == declarations.ACCESS_TYPES.PRIVATE:
return messages.W1013
return ''
def does_define_implicit_conversion( self ):
""" returns true if the constructor can take part in implicit conversions.
For more information see:
* http://boost.org/libs/python/doc/v2/implicit.html#implicitly_convertible-spec
* http://msdn2.microsoft.com/en-us/library/h1y7x448.aspx
* http://msdn.microsoft.com/en-us/library/s2ff0fz8%28VS.100%29.aspx
"""
if self.parent.is_abstract: #user is not able to create an instance of the class
return False
if self.is_copy_constructor:
return False
if not( len( self.arguments) and len( self.required_args ) < 2 ):
return False
if self.parent.find_out_member_access_type( self ) != declarations.ACCESS_TYPES.PUBLIC:
return False
return True
def _get_allow_implicit_conversion(self):
return self._allow_implicit_conversion and self.does_define_implicit_conversion()
def _set_allow_implicit_conversion(self, allow_implicit_conversion):
self._allow_implicit_conversion = allow_implicit_conversion
allow_implicit_conversion = property( _get_allow_implicit_conversion, _set_allow_implicit_conversion
, doc="boolean, indicates whether `Py++` should generate implicitly_convertible code or not" \
"Default value is calculated from the constructor type." )
class destructor_t( declarations.destructor_t, calldef_t ):
"""you may ignore this class for he time being.
In future it will contain "body" property, that will allow to insert user
code to class-wrapper destructor.
"""
#TODO: add body property
def __init__(self, *arguments, **keywords):
declarations.destructor_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
class operators_helper:
"""helps `Py++` to deal with C++ operators"""
inplace = [ '+=', '-=', '*=', '/=', '%=', '>>=', '<<=', '&=', '^=', '|=' ]
comparison = [ '==', '!=', '<', '>', '<=', '>=' ]
non_member = [ '+', '-', '*', '/', '%', '&', '^', '|', ]
unary = [ '!', '~', '+', '-' ]
all = inplace + comparison + non_member + unary
@staticmethod
def is_supported( oper ):
"""returns True if Boost.Python support the operator"""
if oper.symbol == '*' and len( oper.arguments ) == 0:
#dereference does not make sense
return False
if oper.symbol != '<<':
return oper.symbol in operators_helper.all
args_len = len( oper.arguments )
if isinstance( oper, declarations.member_operator_t ):# and args_len != 1:
return False #Boost.Python does not support member operator<< :-(
if isinstance( oper, declarations.free_operator_t ) and args_len != 2:
return False
if not declarations.is_same( oper.return_type, oper.arguments[0].type ):
return False
type_ = oper.return_type
if not declarations.is_reference( type_ ):
return False
type_ = declarations.remove_reference( type_ )
if declarations.is_const( type_ ):
return False
if args_len == 2:
#second argument should has "T const &" type, otherwise the code will not compile
tmp = oper.arguments[1].type
if not declarations.is_reference( tmp ):
return False
tmp = declarations.remove_reference( tmp )
if not declarations.is_const( tmp ):
return False
return declarations.is_std_ostream( type_ ) or declarations.is_std_wostream( type_ )
@staticmethod
def exportable( oper ):
"""returns True if Boost.Python or `Py++` know how to export the operator"""
if isinstance( oper, declarations.member_operator_t ) and oper.symbol in ( '()', '[]', '=' ):
return ''
if not operators_helper.is_supported( oper ):
return messages.W1014 % oper.name
if isinstance( oper, declarations.free_operator_t ):
#`Py++` should find out whether the relevant class is exposed to Python
#and if not, than this operator should not be exposed too
included = [decl for decl in oper.class_types if decl.ignore == False]
if not included:
return messages.W1052 % str(oper)
return ''
@staticmethod
def target_class( oper ):
"""this functions returns reference to class/class declaration
in scope of which, the operator should be exposed."""
if isinstance( oper.parent, declarations.class_t ):
return oper.parent
#now we deal with free operators
def find_class( type_ ):
type_ = declarations.remove_reference( type_ )
if declarations.is_class( type_ ):
return declarations.class_traits.get_declaration( type_ )
elif declarations.is_class_declaration( type_ ):
return declarations.class_declaration_traits.get_declaration( type_ )
else:
return None
arg_1_class = find_class( oper.arguments[0].type )
arg_2_class = None
if 2 == len( oper.arguments ):
arg_2_class = find_class( oper.arguments[1].type )
if arg_1_class:
if declarations.is_std_ostream( arg_1_class ) or declarations.is_std_wostream( arg_1_class ):
#in most cases users doesn't expose std::ostream class
return arg_2_class
else:
return arg_1_class
else:
return arg_2_class
class member_operator_t( declarations.member_operator_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the member operator"""
def __init__(self, *arguments, **keywords):
declarations.member_operator_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._override_precall_code = []
self._default_precall_code = []
self._overide_native_precall_code = []
def add_override_precall_code(self, code):
self._override_precall_code.append( code )
@property
def override_precall_code(self):
return self._override_precall_code
def add_default_precall_code(self, code):
self._default_precall_code.append( code )
@property
def default_precall_code(self):
return self._default_precall_code
def add_override_native_precall_code(self, code):
"""add code, which should be executed, before native member function call"""
self._overide_native_precall_code.append( code )
@property
def override_native_precall_code(self):
"""code, which should be executed, before overrided member function call"""
return self._overide_native_precall_code
def _get_alias( self):
alias = super( member_operator_t, self )._get_alias()
if alias == self.name:
if self.symbol == '()':
alias = '__call__'
elif self.symbol == '[]':
alias = '__getitem__'
elif self.symbol == '=':
alias = 'assign'
else:
pass
return alias
alias = property( _get_alias, decl_wrapper.decl_wrapper_t._set_alias
, doc="Gives right alias for operator()( __call__ ) and operator[]( __getitem__ )" )
def _exportable_impl_derived( self ):
if self.access_type == declarations.ACCESS_TYPES.PRIVATE \
and self.virtuality == declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
return messages.W1015
return operators_helper.exportable( self )
@property
def target_class( self ):
return self.parent
class casting_operator_t( declarations.casting_operator_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the casting operator"""
def prepare_special_cases():
"""
Creates a map of special cases ( aliases ) for casting operator.
"""
special_cases = {}
const_t = declarations.const_t
pointer_t = declarations.pointer_t
for type_ in list(declarations.FUNDAMENTAL_TYPES.values()):
alias = None
if declarations.is_same( type_, declarations.bool_t() ):
alias = '__int__'
elif declarations.is_integral( type_ ):
if 'long' in type_.decl_string:
alias = '__long__'
else:
alias = '__int__'
elif declarations.is_floating_point( type_ ):
alias = '__float__'
else:
continue #void
if alias:
special_cases[ type_ ] = alias
special_cases[ const_t( type_ ) ] = alias
special_cases[ pointer_t( const_t( declarations.char_t() ) ) ] = '__str__'
std_string = '::std::basic_string<char,std::char_traits<char>,std::allocator<char> >'
std_wstring1 = '::std::basic_string<wchar_t,std::char_traits<wchar_t>,std::allocator<wchar_t> >'
std_wstring2 = '::std::basic_string<wchar_t, std::char_traits<wchar_t>, std::allocator<wchar_t> >'
special_cases[ std_string ] = '__str__'
special_cases[ std_wstring1 ] = '__str__'
special_cases[ std_wstring2 ] = '__str__'
special_cases[ '::std::string' ] = '__str__'
special_cases[ '::std::wstring' ] = '__str__'
#TODO: add
# std::complex<SomeType> some type should be converted to double
return special_cases
SPECIAL_CASES = prepare_special_cases()
#casting_member_operator_t.prepare_special_cases()
def __init__(self, *arguments, **keywords):
declarations.casting_operator_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
def _get_alias( self):
if not self._alias or self.name == super( casting_operator_t, self )._get_alias():
return_type = declarations.remove_alias( self.return_type )
decl_string = return_type.decl_string
for type_, alias in list(self.SPECIAL_CASES.items()):
if isinstance( type_, declarations.type_t ):
if declarations.is_same( return_type, type_ ):
self._alias = alias
break
else:
if decl_string == type_:
self._alias = alias
break
else:
self._alias = 'as_' + self._generate_valid_name(self.return_type.decl_string)
return self._alias
alias = property( _get_alias, decl_wrapper.decl_wrapper_t._set_alias
, doc="Gives right alias for casting operators: __int__, __long__, __str__." \
+"If there is no built-in type, creates as_xxx alias" )
def _exportable_impl_derived( self ):
if not declarations.is_fundamental( self.return_type ) and not self.has_const:
return messages.W1016
if self.access_type != declarations.ACCESS_TYPES.PUBLIC:
return messages.W1017
return ''
class free_function_t( declarations.free_function_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the free function"""
def __init__(self, *arguments, **keywords):
declarations.free_function_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._use_overload_macro = False
self._declaration_code = []
self._adaptor = None
def _get_adaptor(self):
return self._adaptor
def _set_adaptor(self, adaptor):
self._adaptor = adaptor
adaptor = property( _get_adaptor, _set_adaptor
, doc="string, if contains value `Py++` will generate code the following code: " \
+"def(<name>, <adaptor>(<function reference>), <other args> ) " )
def add_declaration_code( self, code ):
"""adds the code to the declaration section"""
self.declaration_code.append( user_text.user_text_t( code ) )
@property
def declaration_code( self ):
"""
List of strings, that contains valid C++ code, that will be added to
the same file in which the registration code for the function will be
generated
"""
return self._declaration_code
def get_use_overload_macro(self):
return self._use_overload_macro
def set_use_overload_macro(self, use_macro):
self._use_overload_macro = use_macro
use_overload_macro = property( get_use_overload_macro, set_use_overload_macro
, doc="boolean, if True, will use BOOST_PYTHON_FUNCTION_OVERLOADS macro to expose declarations" \
+"Default value is False.")
class free_operator_t( declarations.free_operator_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the free operator"""
def __init__(self, *arguments, **keywords):
declarations.free_operator_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._target_class = None
def _exportable_impl_derived( self ):
return operators_helper.exportable( self )
def get_target_class( self ):
if self._target_class is None:
self._target_class = operators_helper.target_class( self )
return self._target_class
def set_target_class( self, class_ ):
self._target_class = class_
_target_class_doc_ = "reference to class_t or class_declaration_t object." \
+ " There are use cases, where `Py++` doesn't guess right, in what scope" \
+ " free operator should be registered( exposed ). If this is your use case " \
+ " than setting the class will allow you to quickly fix the situation. "
target_class = property( get_target_class, set_target_class, doc=_target_class_doc_ )
| 44.68541 | 135 | 0.634119 |
acf1444271da7bd35df7503cf6efb5bba3f1b115 | 5,756 | py | Python | selfdrive/manager/process_config.py | ajouatom/openpilot_circuit | e0c40ea0f8e8aa01124e23bc6685285d6ca49a89 | [
"MIT"
] | null | null | null | selfdrive/manager/process_config.py | ajouatom/openpilot_circuit | e0c40ea0f8e8aa01124e23bc6685285d6ca49a89 | [
"MIT"
] | null | null | null | selfdrive/manager/process_config.py | ajouatom/openpilot_circuit | e0c40ea0f8e8aa01124e23bc6685285d6ca49a89 | [
"MIT"
] | null | null | null | import os
from common.params import Params
from selfdrive.hardware import EON, TICI, PC
from selfdrive.manager.process import PythonProcess, NativeProcess, DaemonProcess
WEBCAM = os.getenv("USE_WEBCAM") is not None
if Params().get_bool('LoggerEnabled'):
procs = [
DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"),
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
NativeProcess("camerad", "selfdrive/camerad", ["./camerad"], unkillable=True, driverview=True),
NativeProcess("clocksd", "selfdrive/clocksd", ["./clocksd"]),
NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM), driverview=True),
NativeProcess("logcatd", "selfdrive/logcatd", ["./logcatd"]),
NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"]),
NativeProcess("modeld", "selfdrive/modeld", ["./modeld"]),
NativeProcess("navd", "selfdrive/ui/navd", ["./navd"], enabled=(PC or TICI), persistent=True),
NativeProcess("proclogd", "selfdrive/proclogd", ["./proclogd"]),
NativeProcess("sensord", "selfdrive/sensord", ["./sensord"], enabled=not PC, persistent=EON, sigkill=EON),
NativeProcess("ubloxd", "selfdrive/locationd", ["./ubloxd"], enabled=(not PC or WEBCAM)),
NativeProcess("ui", "selfdrive/ui", ["./ui"], persistent=True, watchdog_max_dt=(5 if TICI else None)),
NativeProcess("soundd", "selfdrive/ui/soundd", ["./soundd"], persistent=True),
NativeProcess("locationd", "selfdrive/locationd", ["./locationd"]),
NativeProcess("boardd", "selfdrive/boardd", ["./boardd"], enabled=False),
PythonProcess("calibrationd", "selfdrive.locationd.calibrationd"),
PythonProcess("controlsd", "selfdrive.controls.controlsd"),
PythonProcess("deleter", "selfdrive.loggerd.deleter", persistent=True),
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM), driverview=True),
PythonProcess("logmessaged", "selfdrive.logmessaged", persistent=True),
PythonProcess("pandad", "selfdrive.pandad", persistent=True),
PythonProcess("paramsd", "selfdrive.locationd.paramsd"),
PythonProcess("plannerd", "selfdrive.controls.plannerd"),
PythonProcess("radard", "selfdrive.controls.radard"),
PythonProcess("thermald", "selfdrive.thermald.thermald", persistent=True),
PythonProcess("timezoned", "selfdrive.timezoned", enabled=TICI, persistent=True),
PythonProcess("tombstoned", "selfdrive.tombstoned", enabled=not PC, persistent=True),
PythonProcess("updated", "selfdrive.updated", enabled=not PC, persistent=True),
PythonProcess("uploader", "selfdrive.loggerd.uploader", persistent=True),
PythonProcess("statsd", "selfdrive.statsd", persistent=True),
# EON only
PythonProcess("rtshield", "selfdrive.rtshield", enabled=EON),
PythonProcess("shutdownd", "selfdrive.hardware.eon.shutdownd", enabled=EON),
PythonProcess("androidd", "selfdrive.hardware.eon.androidd", enabled=EON, persistent=True),
]
else:
procs = [
#DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"),
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
NativeProcess("camerad", "selfdrive/camerad", ["./camerad"], unkillable=True, driverview=True),
NativeProcess("clocksd", "selfdrive/clocksd", ["./clocksd"]),
NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM), driverview=True),
#NativeProcess("logcatd", "selfdrive/logcatd", ["./logcatd"]),
#NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"]),
NativeProcess("modeld", "selfdrive/modeld", ["./modeld"]),
NativeProcess("navd", "selfdrive/ui/navd", ["./navd"], enabled=(PC or TICI), persistent=True),
NativeProcess("proclogd", "selfdrive/proclogd", ["./proclogd"]),
NativeProcess("sensord", "selfdrive/sensord", ["./sensord"], enabled=not PC, persistent=EON, sigkill=EON),
NativeProcess("ubloxd", "selfdrive/locationd", ["./ubloxd"], enabled=(not PC or WEBCAM)),
NativeProcess("ui", "selfdrive/ui", ["./ui"], persistent=True, watchdog_max_dt=(5 if TICI else None)),
NativeProcess("soundd", "selfdrive/ui/soundd", ["./soundd"], persistent=True),
NativeProcess("locationd", "selfdrive/locationd", ["./locationd"]),
NativeProcess("boardd", "selfdrive/boardd", ["./boardd"], enabled=False),
PythonProcess("calibrationd", "selfdrive.locationd.calibrationd"),
PythonProcess("controlsd", "selfdrive.controls.controlsd"),
PythonProcess("deleter", "selfdrive.loggerd.deleter", persistent=True),
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM), driverview=True),
#PythonProcess("logmessaged", "selfdrive.logmessaged", persistent=True),
PythonProcess("pandad", "selfdrive.pandad", persistent=True),
PythonProcess("paramsd", "selfdrive.locationd.paramsd"),
PythonProcess("plannerd", "selfdrive.controls.plannerd"),
PythonProcess("radard", "selfdrive.controls.radard"),
PythonProcess("thermald", "selfdrive.thermald.thermald", persistent=True),
PythonProcess("timezoned", "selfdrive.timezoned", enabled=TICI, persistent=True),
#PythonProcess("tombstoned", "selfdrive.tombstoned", enabled=not PC, persistent=True),
PythonProcess("updated", "selfdrive.updated", enabled=not PC, persistent=True),
#PythonProcess("uploader", "selfdrive.loggerd.uploader", persistent=True),
PythonProcess("statsd", "selfdrive.statsd", persistent=True),
# EON only
PythonProcess("rtshield", "selfdrive.rtshield", enabled=EON),
PythonProcess("shutdownd", "selfdrive.hardware.eon.shutdownd", enabled=EON),
PythonProcess("androidd", "selfdrive.hardware.eon.androidd", enabled=EON, persistent=True),
]
managed_processes = {p.name: p for p in procs}
| 65.409091 | 127 | 0.738013 |
acf1447700c0c542e379db243a4845a20c28101b | 2,345 | py | Python | inventory_registration.py | joao-lanzarini/sales-analytics-python | c9a38a1153d9d56405a85aac8726bcafbdecb001 | [
"MIT"
] | null | null | null | inventory_registration.py | joao-lanzarini/sales-analytics-python | c9a38a1153d9d56405a85aac8726bcafbdecb001 | [
"MIT"
] | null | null | null | inventory_registration.py | joao-lanzarini/sales-analytics-python | c9a38a1153d9d56405a85aac8726bcafbdecb001 | [
"MIT"
] | null | null | null | import pandas as pd
import time
from functions import inventory_check as ic
from functions import id_generator as ig
from functions import clear_screen as screen
def createInventory(): # FIRST GENERATION OF THE INVENTORY
check = ic.checkInventory()
if not check:
screen.clear()
print('Creating your inventory...')
time.sleep(1)
with open (f'inventory.csv', 'w') as file:
file.write('ID, PRODUCT, PRICE, AMOUNT\n')
print('Your inventory has been created successfully!')
while True:
print()
prod = input('Insert the product name [0 to exit]: ').capitalize()
if prod == '0':
break
while True:
price = input(f'Insert the {prod} price ($) [0 to exit]: ').replace(',','.')
try:
float(price)
except ValueError:
print()
print(f'Error: {price} is not a valid number.')
print("Make sure you're not separating thousands.")
print()
except Exception as error:
print(f'ERROR {error}: Something went wrong.')
else:
break
if price == '0':
break
while True:
amount = input(f'Insert the amount of {prod} available [-1 to exit]: ')
try:
int(amount)
except ValueError:
print()
print(f'Error: {amount} is not a valid number.')
print("Only integer number supported.")
print()
except Exception as error:
print(f'ERROR {error}: Something went wrong.')
else:
break
if amount == '-1':
break
screen.clear()
file.write(f'{ig.generate_id()},{prod.capitalize()},{price},{amount}\n')
print()
print('Inventory initialized successfully!')
print()
elif check:
print("Inventory already created.")
print()
| 32.569444 | 96 | 0.455437 |
acf144be4d8c00b03a515ce003e38598af1ba6d1 | 9,113 | py | Python | scripts/data_generation/ne-cooling/volume-vel.py | cquammen/cinema | 9cc8aed8dfe9e9aa73323aaa7bd0b85053e1168e | [
"BSD-3-Clause"
] | 13 | 2015-03-27T17:37:57.000Z | 2021-07-31T16:43:42.000Z | scripts/data_generation/ne-cooling/volume-vel.py | cquammen/cinema | 9cc8aed8dfe9e9aa73323aaa7bd0b85053e1168e | [
"BSD-3-Clause"
] | 2 | 2015-02-18T20:47:27.000Z | 2015-03-12T17:01:34.000Z | scripts/data_generation/ne-cooling/volume-vel.py | cquammen/cinema | 9cc8aed8dfe9e9aa73323aaa7bd0b85053e1168e | [
"BSD-3-Clause"
] | 4 | 2015-03-18T01:00:25.000Z | 2021-07-31T16:43:42.000Z |
###
### This script can be run with pvpython rather than pvbatch, as it does not
### need mpi.
###
### Purpose:
###
### Generate a static image dataset of volume rendering on the ne cooling data
###
### Example usages (assumes you are in directory with this script):
###
### 1) To run on the coarse mesh with tent-shaped opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse" --inputpattern "101results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse/Output/vel/tent" --optype "tent"
###
### 2) To run on the coarse mesh with linear opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse" --inputpattern "101results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse/Output/vel/linear" --optype "linear"
###
### 3) To run on the fine mesh with tent-shaped opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/fine" --inputpattern "fine_results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/fine/Output/vel/tent" --optype "tent"
###
### 4) To run on the fine mesh with linear opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/fine" --inputpattern "fine_results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/fine/Output/vel/linear" --optype "linear"
###
import sys, os, argparse
from paraview.simple import *
from paraview import data_exploration as wx
#import matplotlib.pyplot as plt
###############################################################################
# Helper function to generate the tent functions needed for scalar opacity
# function
###############################################################################
def createHatFunctions():
baseWidth = 0.20
spacing = baseWidth / 2.0
halfWidth = baseWidth / 2.0
numberCenters = 1.0 / baseWidth
centers = [ (baseWidth / 2.0) + (i * baseWidth) for i in range(int(numberCenters)) ]
hatFunctions = []
for c in centers:
startPoint = c - halfWidth
xPoints = [ 0.0, startPoint, startPoint + spacing, startPoint + (2 * spacing), 1.0 ]
yPoints = [ 0.0, 0.0, 1.0, 0.0, 0.0 ]
hatFunctions.append([xPoints, yPoints])
#plt.plot(xPoints, yPoints, marker='o')
#plt.show()
return hatFunctions
###############################################################################
# This method does all the processing
###############################################################################
def doProcessing(inputDir, inputPattern, outputDir, opacityFnType):
# -----------------------------------------------------------------------------
# Path to input/output data/directories
# -----------------------------------------------------------------------------
files_pattern = os.path.join(inputDir, inputPattern)
file_times = range(0, 101)
#file_times = [ 80 ]
filenames = [ (files_pattern % time) for time in file_times]
# -----------------------------------------------------------------------------
# Rendering configuration
# -----------------------------------------------------------------------------
resolution = 500
view_size = [resolution, resolution]
angle_steps = [15, 15]
#angle_steps = [90, 90]
distance = 24632.991324377483
rotation_axis = [0.0, 1.0, 0.0]
#center_of_rotation = [-1649.1046142578125, -752.328125, 1374.1217346191406]
center_of_rotation = [0.0, 0.0, 0.0]
view = GetRenderView()
view.ViewSize = view_size
view.Background = [0.0, 0.0, 0.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
fng = wx.FileNameGenerator(outputDir, '{time}/{volumeIdx}/{theta}_{phi}.jpg')
exporter = wx.ThreeSixtyImageStackExporter(fng,
view,
center_of_rotation,
distance,
rotation_axis,
angle_steps)
# -----------------------------------------------------------------------------
# Pipeline configuration
# -----------------------------------------------------------------------------
# create a new 'Legacy VTK Reader'
readerProxy = LegacyVTKReader(FileNames=filenames)
# This translation transform is a workaround for a bug in the camera orbiting
# calculations made in ThreeSixtyImageStackExporter
transform1 = Transform(Input=readerProxy)
transform1.Transform = 'Transform'
transform1.Transform.Translate = [1649.1046142578125, 752.328125, -1374.1217346191406]
# create a new 'Cell Data to Point Data'
cellDatatoPointData1 = CellDatatoPointData(Input=transform1)
# get color transfer function/color map for 'vel'
velLUT = GetColorTransferFunction('vel')
velLUT.RGBPoints = [0.0, 0.0, 0.0, 1.0, 15000.0, 1.0, 0.0, 0.0]
velLUT.LockScalarRange = 1
velLUT.ColorSpace = 'HSV'
velLUT.NanColor = [0.498039, 0.498039, 0.498039]
velLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'vel'
velPWF = GetOpacityTransferFunction('vel')
velPWF.Points = [0.0, 0.0, 0.5, 0.0, 15000.0, 1.0, 0.5, 0.0]
velPWF.ScalarRangeInitialized = 1
# show data from fine_results_
readerDisplay = Show(transform1)
readerDisplay.ColorArrayName = [None, '']
readerDisplay.Opacity = 0.15
readerDisplay.ScalarOpacityUnitDistance = 79.03822718592288
# show data from cellDatatoPointData1
cellDatatoPointData1Display = Show(cellDatatoPointData1)
cellDatatoPointData1Display.Representation = 'Volume'
cellDatatoPointData1Display.ColorArrayName = ['POINTS', 'vel']
cellDatatoPointData1Display.LookupTable = velLUT
cellDatatoPointData1Display.ScalarOpacityFunction = velPWF
cellDatatoPointData1Display.ScalarOpacityUnitDistance = 79.03822718592288
# -----------------------------------------------------------------------------
# Batch processing
# -----------------------------------------------------------------------------
if opacityFnType == 'tent':
hatFunctions = createHatFunctions()
Render()
for t in range(0, len(file_times), 1):
time = file_times[t]
GetAnimationScene().TimeKeeper.Time = float(time)
UpdatePipeline(time)
dataRange = [0.0, 15000.0]
print "Moving to timestep ",time,", new data range: ",dataRange
for volumeIdx in range(5):
curRange = dataRange[1] - dataRange[0]
pwfPoints = []
if opacityFnType == 'tent':
xPoints = hatFunctions[volumeIdx][0]
yPoints = hatFunctions[volumeIdx][1]
for i in range(len(xPoints)):
pwfPoints.append(dataRange[0] + (xPoints[i] * curRange))
pwfPoints.append(yPoints[i])
pwfPoints.append(0.5)
pwfPoints.append(0.0)
else:
curStep = dataRange[0] + (float(volumeIdx) * (curRange / 5.0))
pwfPoints = [ dataRange[0], 0.0, 0.5, 0.0,
curStep, 0.0, 0.5, 0.0,
dataRange[1], 1.0, 0.5, 0.0 ]
newPwf = CreatePiecewiseFunction( Points=pwfPoints )
cellDatatoPointData1Display.ScalarOpacityFunction = newPwf
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
###############################################################################
# Main script entry point
###############################################################################
if __name__ == "__main__":
description = "Python script to generate volume rendered NE cooling data"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--inputdir", type=str, default="", help="Path to directory where input data files exist")
parser.add_argument("--inputpattern", type=str, default="", help="String pattern containing %d where pattern should be replaced with numbers")
parser.add_argument("--outputdir", type=str, default="", help="Path to directory where cinema dataset should be written")
parser.add_argument("--optype", type=str, default="", help="Opacity function type, should be either 'tent' or 'linear'")
args = parser.parse_args()
doProcessing(args.inputdir, args.inputpattern, args.outputdir, args.optype)
| 44.237864 | 257 | 0.558104 |
acf144d5e43d7d78baca760634d5508ccd374e44 | 104 | py | Python | project/src/Model/global/state.py | sayakoftheleaf/Player-Me | 2cbaa4e7e1f40b81a8bf01566b12288ad0b9ffde | [
"BSD-3-Clause"
] | 1 | 2018-12-26T22:04:16.000Z | 2018-12-26T22:04:16.000Z | project/src/Model/global/state.py | sayakoftheleaf/Player-Me | 2cbaa4e7e1f40b81a8bf01566b12288ad0b9ffde | [
"BSD-3-Clause"
] | null | null | null | project/src/Model/global/state.py | sayakoftheleaf/Player-Me | 2cbaa4e7e1f40b81a8bf01566b12288ad0b9ffde | [
"BSD-3-Clause"
] | null | null | null | state = {
'habits': {},
'quests': {},
'rewards': {},
'tasks': {},
'metaData': {}
}
| 11.555556 | 18 | 0.355769 |
acf144f1ec0c77cac9cb710f845e5052161b884d | 5,853 | py | Python | server/openapi_server/models/person.py | mintproject/MINT-ModelCatalogIngestionAPI | 026d3495483a3e48ea3c1364d0dda09beeea69e4 | [
"Apache-2.0"
] | 2 | 2019-05-30T21:33:43.000Z | 2019-09-27T21:04:38.000Z | server/openapi_server/models/person.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | 82 | 2019-10-08T16:35:34.000Z | 2022-03-15T18:25:27.000Z | server/openapi_server/models/person.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class Person(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, identifier=None, website=None, description=None, id=None, label=None, type=None, email=None): # noqa: E501
"""Person - a model defined in OpenAPI
:param identifier: The identifier of this Person. # noqa: E501
:type identifier: List[str]
:param website: The website of this Person. # noqa: E501
:type website: List[str]
:param description: The description of this Person. # noqa: E501
:type description: List[str]
:param id: The id of this Person. # noqa: E501
:type id: str
:param label: The label of this Person. # noqa: E501
:type label: List[str]
:param type: The type of this Person. # noqa: E501
:type type: List[str]
:param email: The email of this Person. # noqa: E501
:type email: List[str]
"""
self.openapi_types = {
'identifier': List[str],
'website': List[str],
'description': List[str],
'id': str,
'label': List[str],
'type': List[str],
'email': List[str]
}
self.attribute_map = {
'identifier': 'identifier',
'website': 'website',
'description': 'description',
'id': 'id',
'label': 'label',
'type': 'type',
'email': 'email'
}
self._identifier = identifier
self._website = website
self._description = description
self._id = id
self._label = label
self._type = type
self._email = email
@classmethod
def from_dict(cls, dikt) -> 'Person':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Person of this Person. # noqa: E501
:rtype: Person
"""
return util.deserialize_model(dikt, cls)
@property
def identifier(self):
"""Gets the identifier of this Person.
Identifier of the resource being described # noqa: E501
:return: The identifier of this Person.
:rtype: List[str]
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""Sets the identifier of this Person.
Identifier of the resource being described # noqa: E501
:param identifier: The identifier of this Person.
:type identifier: List[str]
"""
self._identifier = identifier
@property
def website(self):
"""Gets the website of this Person.
Website of the software # noqa: E501
:return: The website of this Person.
:rtype: List[str]
"""
return self._website
@website.setter
def website(self, website):
"""Sets the website of this Person.
Website of the software # noqa: E501
:param website: The website of this Person.
:type website: List[str]
"""
self._website = website
@property
def description(self):
"""Gets the description of this Person.
small description # noqa: E501
:return: The description of this Person.
:rtype: List[str]
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Person.
small description # noqa: E501
:param description: The description of this Person.
:type description: List[str]
"""
self._description = description
@property
def id(self):
"""Gets the id of this Person.
identifier # noqa: E501
:return: The id of this Person.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Person.
identifier # noqa: E501
:param id: The id of this Person.
:type id: str
"""
self._id = id
@property
def label(self):
"""Gets the label of this Person.
short description of the resource # noqa: E501
:return: The label of this Person.
:rtype: List[str]
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Person.
short description of the resource # noqa: E501
:param label: The label of this Person.
:type label: List[str]
"""
self._label = label
@property
def type(self):
"""Gets the type of this Person.
type of the resource # noqa: E501
:return: The type of this Person.
:rtype: List[str]
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Person.
type of the resource # noqa: E501
:param type: The type of this Person.
:type type: List[str]
"""
self._type = type
@property
def email(self):
"""Gets the email of this Person.
Email of a person # noqa: E501
:return: The email of this Person.
:rtype: List[str]
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this Person.
Email of a person # noqa: E501
:param email: The email of this Person.
:type email: List[str]
"""
self._email = email
| 24.696203 | 130 | 0.564326 |
acf1471451c7e3b0fbea951c17da3d1036196416 | 829 | py | Python | src/demos/branchandbound/tsp3.py | DavidLlorens/algoritmia | 40ca0a89ea6de9b633fa5f697f0a28cae70816a2 | [
"MIT"
] | 6 | 2018-09-15T15:09:10.000Z | 2022-02-27T01:23:11.000Z | src/demos/branchandbound/tsp3.py | JeromeIllgner/algoritmia | 406afe7206f2411557859bf03480c16db7dcce0d | [
"MIT"
] | null | null | null | src/demos/branchandbound/tsp3.py | JeromeIllgner/algoritmia | 406afe7206f2411557859bf03480c16db7dcce0d | [
"MIT"
] | 5 | 2018-07-10T20:19:55.000Z | 2021-03-31T03:32:22.000Z | #coding: latin1
#< full
from algoritmia.problems.tsp import TspAsBranchAndBoundProblem3
from algoritmia.schemes.branchandbound import BranchAndBoundSolver
from algoritmia.datastructures.prioritydicts import MinHeapMap
from algoritmia.datastructures.graphs import UndirectedGraph, WeightingFunction
w = WeightingFunction({(0,1): 0, (0,2): 15, (0,3): 2, (1,3): 3, (1,4): 13, (2,3): 11,
(2,5): 4, (3,4): 5, (3,5): 8, (3,6): 12, (4,7): 9, (5,6): 16,
(5,8):10, (6,7): 17, (6,8): 1, (6,9): 6, (7,9): 14, (8,9): 7},
symmetrical=True)
G = UndirectedGraph(E=w.keys())
problem = TspAsBranchAndBoundProblem3(G, w)
x, weight = BranchAndBoundSolver(problem, lambda keyvalues: MinHeapMap(keyvalues)).solve()
print('Camino', x, 'con peso', weight)
#> full | 46.055556 | 91 | 0.626055 |
acf147558e0f4e4b9686c6abe0c572e2fac2be5d | 2,910 | py | Python | recipes/guetzli/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/guetzli/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/guetzli/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | from conans import AutoToolsBuildEnvironment, ConanFile, MSBuild, tools
from conans.errors import ConanInvalidConfiguration
import os
class GoogleGuetzliConan(ConanFile):
name = "guetzli"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://opensource.google/projects/guetzli"
description = "Perceptual JPEG encoder"
topics = "jpeg", "compression"
exports_sources = "patches/**"
settings = "os", "compiler", "arch"
generators = "pkg_config"
requires = ["libpng/1.6.37"]
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _is_msvc(self):
return self.settings.compiler == "Visual Studio"
def configure(self):
if self.settings.os not in ["Linux", "Windows"]:
raise ConanInvalidConfiguration("conan recipe for guetzli v{0} is not \
available in {1}.".format(self.version, self.settings.os))
if self.settings.compiler.get_safe("libcxx") == "libc++":
raise ConanInvalidConfiguration("conan recipe for guetzli v{0} cannot be\
built with libc++".format(self.version))
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "guetzli-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build(self):
self._patch_sources()
if self._is_msvc:
msbuild = MSBuild(self)
with tools.chdir(self._source_subfolder):
msbuild.build("guetzli.sln", build_type="Release")
else:
autotools = AutoToolsBuildEnvironment(self)
with tools.chdir(self._source_subfolder):
env_vars = {"PKG_CONFIG_PATH": self.build_folder}
env_vars.update(autotools.vars)
with tools.environment_append(env_vars):
make_args = [
"config=release",
"verbose=1',"
]
autotools.make(args=make_args)
def package(self):
if self._is_msvc:
self.copy(os.path.join(self._source_subfolder, "bin", str(self.settings.arch), "Release", "guetzli.exe"), dst="bin", keep_path=False)
else:
self.copy(os.path.join(self._source_subfolder, "bin", "Release", "guetzli"), dst="bin", keep_path=False)
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
def package_id(self):
del self.info.settings.compiler
def package_info(self):
bindir = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bindir))
self.env_info.PATH.append(bindir)
| 37.792208 | 145 | 0.620619 |
acf147ac9cbf9259a68526672a840a5197bb59f3 | 322 | py | Python | meiduo_mall02/utils/models.py | hongyinwang/meiduo_project02 | 3f21773d2d98204400ea2c3738969ac2a593b242 | [
"MIT"
] | null | null | null | meiduo_mall02/utils/models.py | hongyinwang/meiduo_project02 | 3f21773d2d98204400ea2c3738969ac2a593b242 | [
"MIT"
] | null | null | null | meiduo_mall02/utils/models.py | hongyinwang/meiduo_project02 | 3f21773d2d98204400ea2c3738969ac2a593b242 | [
"MIT"
] | null | null | null | from django.db import models
class BaseModel(models.Model):
"""为模型类补充字段"""
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
update_time = models.DateTimeField(auto_now=True, verbose_name="更新时间")
class Meta:
abstract = True # 说明是抽象模型类, 用于继承使用,数据库迁移时不会创建BaseModel的表
| 21.466667 | 78 | 0.723602 |
acf148037e4daa9b4006e496a5439efe77b75671 | 1,438 | py | Python | airflow_server/dags/news_flash.py | ephraimberkovitch/anyway-etl | e5b4b8c18ef9899c8edbdbc8bfecc933b56c892b | [
"MIT"
] | null | null | null | airflow_server/dags/news_flash.py | ephraimberkovitch/anyway-etl | e5b4b8c18ef9899c8edbdbc8bfecc933b56c892b | [
"MIT"
] | 15 | 2021-08-22T11:53:47.000Z | 2022-02-21T17:09:53.000Z | airflow_server/dags/news_flash.py | ephraimberkovitch/anyway-etl | e5b4b8c18ef9899c8edbdbc8bfecc933b56c892b | [
"MIT"
] | 5 | 2021-07-20T22:19:35.000Z | 2022-02-22T09:44:25.000Z | from textwrap import dedent
from airflow import DAG
from airflow.utils.dates import days_ago
from anyway_etl_airflow.operators.cli_bash_operator import CliBashOperator
dag_kwargs = dict(
default_args={
'owner': 'airflow',
},
catchup=False,
start_date=days_ago(2),
)
with DAG('process-news-flash', **dag_kwargs, schedule_interval='*/5 * * * *') as process_news_flash_dag:
CliBashOperator(
'anyway-etl anyway-kubectl-exec python3 main.py process news-flash',
task_id='process-news-flash'
)
with DAG('update-news-flash', **dag_kwargs, schedule_interval=None,
description='Update a single news flash item based on id, must run manually with json, example:'
'{"news_flash_id": "65516"}') as update_news_flash_dag:
CliBashOperator(
'anyway-etl anyway-kubectl-exec python3 main.py '
'update-news-flash update --news_flash_id {{ dag_run.conf["news_flash_id"] }}',
task_id='update-news-flash'
)
with DAG('test-anyway-kubectl-exec', **dag_kwargs, schedule_interval=None) as test_anyway_kubectl_Exec:
CliBashOperator(
'''anyway-etl anyway-kubectl-exec -- python3 -c "{}"'''.format(dedent("""
import logging, time
logging.basicConfig(level=logging.DEBUG)
for i in range(20):
logging.info(str(i))
time.sleep(2)
""")),
task_id='test-anyway-kubectl-exec'
)
| 31.26087 | 105 | 0.662726 |
acf1484c247523724ad1fc7e7019f87af83f1ba2 | 520 | py | Python | tablemanager/migrations/0008_input_sld.py | dbca-asi/borgcollector | 4487fc1a37c057305852db49a05c704294a0e9e3 | [
"BSD-3-Clause"
] | 2 | 2016-01-20T02:26:06.000Z | 2016-02-16T02:47:24.000Z | tablemanager/migrations/0008_input_sld.py | dbca-asi/borgcollector | 4487fc1a37c057305852db49a05c704294a0e9e3 | [
"BSD-3-Clause"
] | 4 | 2020-02-11T23:40:10.000Z | 2021-09-22T04:27:50.000Z | tablemanager/migrations/0008_input_sld.py | dbca-wa/borgcollector | dab9464f2e58c7dbc039b4805bb894b168547938 | [
"BSD-3-Clause"
] | 4 | 2016-01-12T02:10:14.000Z | 2017-11-09T13:53:16.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tablemanager.models
class Migration(migrations.Migration):
dependencies = [
('tablemanager', '0007_auto_20150610_1156'),
]
operations = [
migrations.AddField(
model_name='input',
name='sld',
field=tablemanager.models.XMLField(help_text='Styled Layer Descriptor', null=True, blank=True),
preserve_default=True,
),
]
| 23.636364 | 107 | 0.638462 |
acf1491be3674a839537401791e095dcd115e68b | 6,815 | py | Python | Scripts/simulation/away_actions/away_action_tracker.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/away_actions/away_action_tracker.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/away_actions/away_action_tracker.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\away_actions\away_action_tracker.py
# Compiled at: 2019-08-01 19:20:45
# Size of source mod 2**32: 10606 bytes
from objects import ALL_HIDDEN_REASONS
from sims.sim_info_lod import SimInfoLODLevel
from sims.sim_info_tracker import SimInfoTracker
from sims4.callback_utils import CallableList
from sims4.utils import classproperty
import services, sims4
logger = sims4.log.Logger('AwayActionTracker')
class AwayActionTracker(SimInfoTracker):
def __init__(self, sim_info):
self._sim_info = sim_info
self._current_away_action = None
self._on_away_action_started = CallableList()
self._on_away_action_ended = CallableList()
self.add_on_away_action_started_callback(self._resend_away_action)
self.add_on_away_action_ended_callback(self._resend_away_action)
@classproperty
def _tracker_lod_threshold(cls):
return SimInfoLODLevel.FULL
@property
def sim_info(self):
return self._sim_info
@property
def current_away_action(self):
return self._current_away_action
def _resend_away_action(self, _):
self.sim_info.resend_current_away_action()
def is_sim_info_valid_to_run_away_actions(self):
if self._sim_info.is_npc:
return False
if self._sim_info.is_baby:
return False
return True
def _run_current_away_action(self):
self._current_away_action.run(self._away_action_exit_condition_callback)
self._on_away_action_started(self._current_away_action)
def _find_away_action_from_load(self):
away_actions_manager = services.get_instance_manager(sims4.resources.Types.AWAY_ACTION)
for away_action_cls in away_actions_manager.types.values():
if away_action_cls.should_run_on_load(self.sim_info):
return away_action_cls
def start(self, on_travel_away=False):
if not self.is_sim_info_valid_to_run_away_actions():
logger.error('Attempting to start away action tracker on invalid sim info {}.', (self._sim_info),
owner='jjacobson')
return
if self._sim_info.is_instanced(allow_hidden_flags=ALL_HIDDEN_REASONS):
if (on_travel_away or self._current_away_action) is not None:
if not self._current_away_action.available_when_instanced:
self.stop()
return
else:
return
if self._current_away_action is not None:
self._run_current_away_action()
return
away_action_cls = self._find_away_action_from_load()
if away_action_cls is not None:
self.create_and_apply_away_action(away_action_cls)
return
self.reset_to_default_away_action(on_travel_away=on_travel_away)
def stop(self):
if self._current_away_action is not None:
if self._current_away_action.is_running:
self._current_away_action.stop()
self._on_away_action_ended(self._current_away_action)
self._current_away_action = None
def clean_up(self):
self.remove_on_away_action_started_callback(self._resend_away_action)
self.remove_on_away_action_ended_callback(self._resend_away_action)
self.stop()
def refresh(self, on_travel_away=False):
if not self.is_sim_info_valid_to_run_away_actions():
return
else:
current_zone = services.current_zone()
if not current_zone.is_zone_running:
if not current_zone.are_sims_hitting_their_marks:
return
if self._sim_info.zone_id == services.current_zone_id():
self.stop()
self._current_away_action = None
else:
self.start(on_travel_away=on_travel_away)
def create_and_apply_away_action(self, away_action_cls, target=None):
if not self.is_sim_info_valid_to_run_away_actions():
logger.warn('Attempting to apply away action on invalid sim info {}.', (self._sim_info),
owner='jjacobson')
return
self.stop()
self._current_away_action = away_action_cls(self, target=target)
self._run_current_away_action()
def _away_action_exit_condition_callback(self, _):
self.reset_to_default_away_action()
def reset_to_default_away_action(self, on_travel_away=False):
default_away_action = self.sim_info.get_default_away_action(on_travel_away=on_travel_away)
if default_away_action is None:
self.stop()
return
self.create_and_apply_away_action(default_away_action)
def save_away_action_info_to_proto(self, away_action_tracker_proto):
if self._current_away_action is not None:
away_action_tracker_proto.away_action.away_action_id = self._current_away_action.guid64
target = self._current_away_action.target
if target is not None:
away_action_tracker_proto.away_action.target_sim_id = target.id
def load_away_action_info_from_proto(self, away_action_tracker_proto):
if away_action_tracker_proto.HasField('away_action'):
away_action_cls = services.get_instance_manager(sims4.resources.Types.AWAY_ACTION).get(away_action_tracker_proto.away_action.away_action_id)
if away_action_cls is None:
logger.error('Failed to load away action id {}', away_action_tracker_proto.away_action.away_action_id)
return
elif away_action_tracker_proto.away_action.HasField('target_sim_id'):
target = services.sim_info_manager().get(away_action_tracker_proto.away_action.target_sim_id)
else:
target = None
self._current_away_action = away_action_cls(self, target=target)
def add_on_away_action_started_callback(self, callback):
self._on_away_action_started.append(callback)
def remove_on_away_action_started_callback(self, callback):
self._on_away_action_started.remove(callback)
def add_on_away_action_ended_callback(self, callback):
self._on_away_action_ended.append(callback)
def remove_on_away_action_ended_callback(self, callback):
self._on_away_action_ended.remove(callback)
def stop_current_away_action(self):
if self._current_away_action is not None:
self._current_away_action.stop()
def on_lod_update(self, old_lod, new_lod):
if new_lod == SimInfoLODLevel.MINIMUM:
self.clean_up() | 42.59375 | 152 | 0.701981 |
acf14972dbbacd16844e8bb95e94d395308f337a | 730 | py | Python | convert-videos-to-frames.py | jmorenov/RA-practica2-opencv | 99c549aa2b2bf4a2222536e066168ccff5b892d0 | [
"MIT"
] | null | null | null | convert-videos-to-frames.py | jmorenov/RA-practica2-opencv | 99c549aa2b2bf4a2222536e066168ccff5b892d0 | [
"MIT"
] | null | null | null | convert-videos-to-frames.py | jmorenov/RA-practica2-opencv | 99c549aa2b2bf4a2222536e066168ccff5b892d0 | [
"MIT"
] | null | null | null | import cv2
import os
import videos
VideosDirectory = "Videos/"
FilePattern = "Nodo_*.MOV"
ListOfVideos = videos.load_videos_filename(VideosDirectory, FilePattern)
for i in range(len(ListOfVideos)):
cap = videos.load_video(ListOfVideos[i])
nframes = 0
while(cap.isOpened() and nframes <= 114):
ret, frame = cap.read()
if ret == False:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_name = 'imagen' + str(nframes+1) + '.jpg'
directory_to_save = 'Frames/Nodo_' + str(i+1) + '/'
if not os.path.exists(directory_to_save):
os.makedirs(directory_to_save)
cv2.imwrite(directory_to_save + frame_name, gray)
nframes += 1 | 26.071429 | 72 | 0.641096 |
acf14a0595be487bed530fa9f2d9e4184972114e | 318 | py | Python | scripts/train_test_split.py | Fumiya-Matsumoto/telecom_customer | 18d2109c327765155ea82e746a3791185f10f8fb | [
"RSA-MD"
] | null | null | null | scripts/train_test_split.py | Fumiya-Matsumoto/telecom_customer | 18d2109c327765155ea82e746a3791185f10f8fb | [
"RSA-MD"
] | null | null | null | scripts/train_test_split.py | Fumiya-Matsumoto/telecom_customer | 18d2109c327765155ea82e746a3791185f10f8fb | [
"RSA-MD"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv('./data/input/Telecom_customer churn.csv')
# トレーニングデータ,テストデータの分割
train, test = train_test_split(df, test_size=0.2, random_state=0)
train.to_csv('./data/input/train.csv')
test.to_csv('./data/input/test.csv') | 28.909091 | 65 | 0.77673 |
acf14a952aa66b33df3e7777d0dcdde90c08d48c | 5,533 | py | Python | src/shortsim/scripts/ngrcos.py | hsci-r/shortsim | 8a86f91a7d286887e6674d1f64bafed8900e470a | [
"MIT"
] | null | null | null | src/shortsim/scripts/ngrcos.py | hsci-r/shortsim | 8a86f91a7d286887e6674d1f64bafed8900e470a | [
"MIT"
] | 3 | 2021-07-07T10:34:00.000Z | 2021-07-07T10:40:05.000Z | src/shortsim/scripts/ngrcos.py | hsci-r/shortsim | 8a86f91a7d286887e6674d1f64bafed8900e470a | [
"MIT"
] | null | null | null | import argparse
from collections import defaultdict
from operator import itemgetter
import sys
import tqdm
import warnings
import faiss
import numpy as np
from numpy.linalg import norm
# TODO refactor some of the functionality into a library, so that it can
# also be called from Python code.
def ngrams(string, n):
return (string[i:i+n] for i in range(len(string)-n+1))
def determine_top_ngrams(verses, n, dim):
ngram_freq = defaultdict(lambda: 0)
for text in map(itemgetter(1), verses):
for ngr in ngrams(text, n):
ngram_freq[ngr] += 1
ngram_ids = {
ngr : i \
for i, (ngr, freq) in enumerate(sorted(
ngram_freq.items(), key=itemgetter(1), reverse=True)[:dim]) }
return ngram_ids
def vectorize(verses, ngram_ids, n=2, dim=200, min_ngrams=10):
# FIXME memory is being wasted here by storing v_ids and verses again
# TODO make the progress printer optional
v_ids, v_texts, rows = [], [], []
for (v_id, text) in tqdm.tqdm(verses):
v_ngr_ids = [ngram_ids[ngr] for ngr in ngrams(text, n) \
if ngr in ngram_ids]
if len(v_ngr_ids) >= min_ngrams:
row = np.zeros(dim, dtype=np.float32)
for ngr_id in v_ngr_ids:
row[ngr_id] += 1
rows.append(row)
v_ids.append(v_id)
v_texts.append(text)
m = np.vstack(rows)
m = np.divide(m, norm(m, axis=1).reshape((m.shape[0], 1)))
return v_ids, v_texts, m
def find_similarities(index, m, k, threshold, query_size, print_progress):
if print_progress:
progressbar = tqdm.tqdm(total=m.shape[0])
for i in range(0, m.shape[0], query_size):
query = range(i, min(m.shape[0], i+query_size))
D, I = index.search(m[query,], k)
for i, q in enumerate(query):
for j in range(k):
if q != I[i,j] and D[i,j] >= threshold:
yield (q, I[i,j], D[i,j])
if print_progress:
progressbar.update(D.shape[0])
def read_verses(fp):
result = []
for line in fp:
spl = line.rstrip().split('\t')
if len(spl) < 2: continue
v_id, text = spl[0], spl[1]
result.append((v_id, text))
return result
def parse_arguments():
parser = argparse.ArgumentParser(
description='Compute the n-gram similarities on short strings.')
parser.add_argument(
'-d', '--dim', type=int, default=200,
help='The number of dimensions of n-gram vectors')
parser.add_argument('-g', '--use-gpu', action='store_true')
parser.add_argument(
'-k', type=int, default=10,
help='The number of nearest neighbors to find for each verse.')
parser.add_argument(
'-i', '--index-file', metavar='FILE',
help='Read the verses to index from a separate file.')
parser.add_argument(
'-m', '--min-ngrams', type=int, default=10,
help='Minimum number of known n-grams to consider a verse.')
parser.add_argument(
'-n', type=int, default=2,
help='The size (`n`) of the n-grams (default: 2, i.e. ngrams).')
parser.add_argument(
'-q', '--query-size', type=int, default=100,
help='The number of verses to pass in a single query '
'(doesn\'t affect the results, only performance)')
parser.add_argument(
'-t', '--threshold', type=float, default=0.7,
help='Minimum similarity to output.')
parser.add_argument(
'-T', '--text', action='store_true',
help='Print the strings additionally to IDs.')
parser.add_argument(
'-p', '--print-progress', action='store_true',
help='Print a progress bar.')
return parser.parse_args()
def main():
args = parse_arguments()
res = None
if args.use_gpu:
try:
res = faiss.StandardGpuResources()
except Exception:
warnings.warn('GPU not available!')
query_verses = read_verses(sys.stdin)
index_verses = []
if args.index_file is not None:
with open(args.index_file) as fp:
index_verses = read_verses(fp)
sys.stderr.write('Counting n-gram frequencies\n')
ngram_ids = determine_top_ngrams(index_verses+query_verses, args.n, args.dim)
sys.stderr.write(' '.join(ngram_ids.keys()) + '\n')
sys.stderr.write('Creating a dense matrix\n')
query_v_ids, query_v_texts, query_m = \
vectorize(query_verses, ngram_ids,
n=args.n, dim=args.dim, min_ngrams=args.min_ngrams)
index_v_ids, index_v_texts, index_m = query_v_ids, query_v_texts, query_m
if index_verses:
index_v_ids, index_v_texts, index_m = \
vectorize(index_verses, ngram_ids,
n=args.n, dim=args.dim, min_ngrams=args.min_ngrams)
sys.stderr.write('Creating a FAISS index\n')
index = faiss.IndexFlatIP(args.dim)
if res is not None:
index = faiss.index_cpu_to_gpu(res, 0, index)
index.add(index_m)
sys.stderr.write('Searching for nearest neighbors\n')
progressbar = None
sims = find_similarities(index, query_m, args.k, args.threshold,
args.query_size, args.print_progress)
for i, j, sim in sims:
v1_id = query_v_ids[i]
v2_id = index_v_ids[j]
if args.text:
v1_text = query_v_texts[i]
v2_text = index_v_texts[j]
print(v1_id, v1_text, v2_id, v2_text, sim, sep='\t')
else:
print(v1_id, v2_id, sim, sep='\t')
| 34.154321 | 81 | 0.61088 |
acf14ba3f210766f562c5d6a70d93f755be3c0a7 | 12,396 | py | Python | metric/torchMoji/torchmoji/lstm.py | andreamad8/PPCM | ac3a06502f9e0126b6b644e1f68c38ca744b3862 | [
"MIT"
] | 25 | 2020-09-24T11:16:42.000Z | 2022-03-31T11:01:59.000Z | metric/torchMoji/torchmoji/lstm.py | andreamad8/PPCM | ac3a06502f9e0126b6b644e1f68c38ca744b3862 | [
"MIT"
] | 9 | 2020-12-19T06:08:09.000Z | 2021-07-20T06:15:44.000Z | metric/torchMoji/torchmoji/lstm.py | andreamad8/PPCM | ac3a06502f9e0126b6b644e1f68c38ca744b3862 | [
"MIT"
] | 2 | 2020-11-04T11:17:44.000Z | 2021-09-16T03:04:03.000Z | # -*- coding: utf-8 -*-
""" Implement a pyTorch LSTM with hard sigmoid reccurent activation functions.
Adapted from the non-cuda variant of pyTorch LSTM at
https://github.com/pytorch/pytorch/blob/master/torch/nn/_functions/rnn.py
"""
from __future__ import print_function, division
import math
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import PackedSequence
import torch.nn.functional as F
class LSTMHardSigmoid(Module):
def __init__(self, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0, bidirectional=False):
super(LSTMHardSigmoid, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.dropout_state = {}
self.bidirectional = bidirectional
num_directions = 2 if bidirectional else 1
gate_size = 4 * hidden_size
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
b_ih = Parameter(torch.Tensor(gate_size))
b_hh = Parameter(torch.Tensor(gate_size))
layer_params = (w_ih, w_hh, b_ih, b_hh)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.flatten_parameters()
self.reset_parameters()
def flatten_parameters(self):
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this is a no-op wince we don't use CUDA acceleration.
"""
self._data_ptrs = []
def _apply(self, fn):
ret = super(LSTMHardSigmoid, self)._apply(fn)
self.flatten_parameters()
return ret
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
batch_sizes = input[1]
input = input[0]
max_batch_size = batch_sizes[0]
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.autograd.Variable(input.data.new(self.num_layers *
num_directions,
max_batch_size,
self.hidden_size).zero_(), requires_grad=False)
hx = (hx, hx)
has_flat_weights = list(p.data.data_ptr() for p in self.parameters()) == self._data_ptrs
if has_flat_weights:
first_data = next(self.parameters()).data
assert first_data.storage().size() == self._param_buf_size
flat_weight = first_data.new().set_(first_data.storage(), 0, torch.Size([self._param_buf_size]))
else:
flat_weight = None
func = AutogradRNN(
self.input_size,
self.hidden_size,
num_layers=self.num_layers,
batch_first=self.batch_first,
dropout=self.dropout,
train=self.training,
bidirectional=self.bidirectional,
batch_sizes=batch_sizes,
dropout_state=self.dropout_state,
flat_weight=flat_weight
)
output, hidden = func(input, self.all_weights, hx)
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, hidden
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
def __setstate__(self, d):
super(LSTMHardSigmoid, self).__setstate__(d)
self.__dict__.setdefault('_data_ptrs', [])
if 'all_weights' in d:
self._all_weights = d['all_weights']
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.bias:
self._all_weights += [weights]
else:
self._all_weights += [weights[:2]]
@property
def all_weights(self):
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
def AutogradRNN(input_size, hidden_size, num_layers=1, batch_first=False,
dropout=0, train=True, bidirectional=False, batch_sizes=None,
dropout_state=None, flat_weight=None):
cell = LSTMCell
if batch_sizes is None:
rec_factory = Recurrent
else:
rec_factory = variable_recurrent_factory(batch_sizes)
if bidirectional:
layer = (rec_factory(cell), rec_factory(cell, reverse=True))
else:
layer = (rec_factory(cell),)
func = StackedRNN(layer,
num_layers,
True,
dropout=dropout,
train=train)
def forward(input, weight, hidden):
if batch_first and batch_sizes is None:
input = input.transpose(0, 1)
nexth, output = func(input, hidden, weight)
if batch_first and batch_sizes is None:
output = output.transpose(0, 1)
return output, nexth
return forward
def Recurrent(inner, reverse=False):
def forward(input, hidden, weight):
output = []
steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
for i in steps:
hidden = inner(input[i], hidden, *weight)
# hack to handle LSTM
output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
if reverse:
output.reverse()
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return hidden, output
return forward
def variable_recurrent_factory(batch_sizes):
def fac(inner, reverse=False):
if reverse:
return VariableRecurrentReverse(batch_sizes, inner)
else:
return VariableRecurrent(batch_sizes, inner)
return fac
def VariableRecurrent(batch_sizes, inner):
def forward(input, hidden, weight):
output = []
input_offset = 0
last_batch_size = batch_sizes[0]
hiddens = []
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
for batch_size in batch_sizes:
step_input = input[input_offset:input_offset + batch_size]
input_offset += batch_size
dec = last_batch_size - batch_size
if dec > 0:
hiddens.append(tuple(h[-dec:] for h in hidden))
hidden = tuple(h[:-dec] for h in hidden)
last_batch_size = batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
hiddens.append(hidden)
hiddens.reverse()
hidden = tuple(torch.cat(h, 0) for h in zip(*hiddens))
assert hidden[0].size(0) == batch_sizes[0]
if flat_hidden:
hidden = hidden[0]
output = torch.cat(output, 0)
return hidden, output
return forward
def VariableRecurrentReverse(batch_sizes, inner):
def forward(input, hidden, weight):
output = []
input_offset = input.size(0)
last_batch_size = batch_sizes[-1]
initial_hidden = hidden
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
initial_hidden = (initial_hidden,)
hidden = tuple(h[:batch_sizes[-1]] for h in hidden)
for batch_size in reversed(batch_sizes):
inc = batch_size - last_batch_size
if inc > 0:
hidden = tuple(torch.cat((h, ih[last_batch_size:batch_size]), 0)
for h, ih in zip(hidden, initial_hidden))
last_batch_size = batch_size
step_input = input[input_offset - batch_size:input_offset]
input_offset -= batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
output.reverse()
output = torch.cat(output, 0)
if flat_hidden:
hidden = hidden[0]
return hidden, output
return forward
def StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):
num_directions = len(inners)
total_layers = num_layers * num_directions
def forward(input, hidden, weight):
assert(len(weight) == total_layers)
next_hidden = []
if lstm:
hidden = list(zip(*hidden))
for i in range(num_layers):
all_output = []
for j, inner in enumerate(inners):
l = i * num_directions + j
hy, output = inner(input, hidden[l], weight[l])
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, input.dim() - 1)
if dropout != 0 and i < num_layers - 1:
input = F.dropout(input, p=dropout, training=train, inplace=False)
if lstm:
next_h, next_c = zip(*next_hidden)
next_hidden = (
torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),
torch.cat(next_c, 0).view(total_layers, *next_c[0].size())
)
else:
next_hidden = torch.cat(next_hidden, 0).view(
total_layers, *next_hidden[0].size())
return next_hidden, input
return forward
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
"""
A modified LSTM cell with hard sigmoid activation on the input, forget and output gates.
"""
hx, cx = hidden
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = hard_sigmoid(ingate)
forgetgate = hard_sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = hard_sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def hard_sigmoid(x):
"""
Computes element-wise hard sigmoid of x.
See e.g. https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py#L279
"""
x = (0.2 * x) + 0.5
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
| 34.625698 | 108 | 0.580994 |
acf14de4dba14fcb4137c9f5f8ba0def8b7a3045 | 12,162 | py | Python | aiohttp/cookiejar.py | rhdxmr/aiohttp | cce5f2df51d93c57ff2d77c82b1e3dba95311727 | [
"Apache-2.0"
] | null | null | null | aiohttp/cookiejar.py | rhdxmr/aiohttp | cce5f2df51d93c57ff2d77c82b1e3dba95311727 | [
"Apache-2.0"
] | 93 | 2020-10-15T19:48:26.000Z | 2022-03-28T01:20:21.000Z | aiohttp/cookiejar.py | rhdxmr/aiohttp | cce5f2df51d93c57ff2d77c82b1e3dba95311727 | [
"Apache-2.0"
] | null | null | null | import asyncio
import datetime
import os # noqa
import pathlib
import pickle
import re
from collections import defaultdict
from http.cookies import BaseCookie, Morsel, SimpleCookie # noqa
from typing import ( # noqa
DefaultDict,
Dict,
Iterable,
Iterator,
Mapping,
Optional,
Set,
Tuple,
Union,
cast,
)
from yarl import URL
from .abc import AbstractCookieJar
from .helpers import is_ip_address, next_whole_second
from .typedefs import LooseCookies, PathLike
__all__ = ('CookieJar', 'DummyCookieJar')
CookieItem = Union[str, 'Morsel[str]']
class CookieJar(AbstractCookieJar):
"""Implements cookie storage adhering to RFC 6265."""
DATE_TOKENS_RE = re.compile(
r"[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*"
r"(?P<token>[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)")
DATE_HMS_TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})")
DATE_DAY_OF_MONTH_RE = re.compile(r"(\d{1,2})")
DATE_MONTH_RE = re.compile("(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|"
"(aug)|(sep)|(oct)|(nov)|(dec)", re.I)
DATE_YEAR_RE = re.compile(r"(\d{2,4})")
MAX_TIME = datetime.datetime.max.replace(
tzinfo=datetime.timezone.utc)
def __init__(self, *, unsafe: bool=False, quote_cookie: bool=True,
loop: Optional[asyncio.AbstractEventLoop]=None) -> None:
super().__init__(loop=loop)
self._cookies = defaultdict(SimpleCookie) #type: DefaultDict[str, SimpleCookie[str]] # noqa
self._host_only_cookies = set() # type: Set[Tuple[str, str]]
self._unsafe = unsafe
self._quote_cookie = quote_cookie
self._next_expiration = next_whole_second()
self._expirations = {} # type: Dict[Tuple[str, str], datetime.datetime] # noqa: E501
def save(self, file_path: PathLike) -> None:
file_path = pathlib.Path(file_path)
with file_path.open(mode='wb') as f:
pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)
def load(self, file_path: PathLike) -> None:
file_path = pathlib.Path(file_path)
with file_path.open(mode='rb') as f:
self._cookies = pickle.load(f)
def clear(self) -> None:
self._cookies.clear()
self._host_only_cookies.clear()
self._next_expiration = next_whole_second()
self._expirations.clear()
def __iter__(self) -> 'Iterator[Morsel[str]]':
self._do_expiration()
for val in self._cookies.values():
yield from val.values()
def __len__(self) -> int:
return sum(1 for i in self)
def _do_expiration(self) -> None:
now = datetime.datetime.now(datetime.timezone.utc)
if self._next_expiration > now:
return
if not self._expirations:
return
next_expiration = self.MAX_TIME
to_del = []
cookies = self._cookies
expirations = self._expirations
for (domain, name), when in expirations.items():
if when <= now:
cookies[domain].pop(name, None)
to_del.append((domain, name))
self._host_only_cookies.discard((domain, name))
else:
next_expiration = min(next_expiration, when)
for key in to_del:
del expirations[key]
try:
self._next_expiration = (next_expiration.replace(microsecond=0) +
datetime.timedelta(seconds=1))
except OverflowError:
self._next_expiration = self.MAX_TIME
def _expire_cookie(self, when: datetime.datetime, domain: str, name: str
) -> None:
self._next_expiration = min(self._next_expiration, when)
self._expirations[(domain, name)] = when
def update_cookies(self,
cookies: LooseCookies,
response_url: URL=URL()) -> None:
"""Update cookies."""
hostname = response_url.raw_host
if not self._unsafe and is_ip_address(hostname):
# Don't accept cookies from IPs
return
if isinstance(cookies, Mapping):
cookies = cookies.items()
for name, cookie in cookies:
if not isinstance(cookie, Morsel):
tmp = SimpleCookie() # type: SimpleCookie[str]
tmp[name] = cookie # type: ignore
cookie = tmp[name]
domain = cookie["domain"]
# ignore domains with trailing dots
if domain.endswith('.'):
domain = ""
del cookie["domain"]
if not domain and hostname is not None:
# Set the cookie's domain to the response hostname
# and set its host-only-flag
self._host_only_cookies.add((hostname, name))
domain = cookie["domain"] = hostname
if domain.startswith("."):
# Remove leading dot
domain = domain[1:]
cookie["domain"] = domain
if hostname and not self._is_domain_match(domain, hostname):
# Setting cookies for different domains is not allowed
continue
path = cookie["path"]
if not path or not path.startswith("/"):
# Set the cookie's path to the response path
path = response_url.path
if not path.startswith("/"):
path = "/"
else:
# Cut everything from the last slash to the end
path = "/" + path[1:path.rfind("/")]
cookie["path"] = path
max_age = cookie["max-age"]
if max_age:
try:
delta_seconds = int(max_age)
try:
max_age_expiration = (
datetime.datetime.now(datetime.timezone.utc) +
datetime.timedelta(seconds=delta_seconds))
except OverflowError:
max_age_expiration = self.MAX_TIME
self._expire_cookie(max_age_expiration,
domain, name)
except ValueError:
cookie["max-age"] = ""
else:
expires = cookie["expires"]
if expires:
expire_time = self._parse_date(expires)
if expire_time:
self._expire_cookie(expire_time,
domain, name)
else:
cookie["expires"] = ""
self._cookies[domain][name] = cookie
self._do_expiration()
def filter_cookies(self,
request_url: URL=URL()
) -> Union['BaseCookie[str]', 'SimpleCookie[str]']:
"""Returns this jar's cookies filtered by their attributes."""
self._do_expiration()
request_url = URL(request_url)
filtered: Union['SimpleCookie[str]', 'BaseCookie[str]'] = (
SimpleCookie() if self._quote_cookie
else BaseCookie()
)
hostname = request_url.raw_host or ""
is_not_secure = request_url.scheme not in ("https", "wss")
for cookie in self:
name = cookie.key
domain = cookie["domain"]
# Send shared cookies
if not domain:
filtered[name] = cookie.value
continue
if not self._unsafe and is_ip_address(hostname):
continue
if (domain, name) in self._host_only_cookies:
if domain != hostname:
continue
elif not self._is_domain_match(domain, hostname):
continue
if not self._is_path_match(request_url.path, cookie["path"]):
continue
if is_not_secure and cookie["secure"]:
continue
# It's critical we use the Morsel so the coded_value
# (based on cookie version) is preserved
mrsl_val = cast('Morsel[str]', cookie.get(cookie.key, Morsel()))
mrsl_val.set(cookie.key, cookie.value, cookie.coded_value)
filtered[name] = mrsl_val
return filtered
@staticmethod
def _is_domain_match(domain: str, hostname: str) -> bool:
"""Implements domain matching adhering to RFC 6265."""
if hostname == domain:
return True
if not hostname.endswith(domain):
return False
non_matching = hostname[:-len(domain)]
if not non_matching.endswith("."):
return False
return not is_ip_address(hostname)
@staticmethod
def _is_path_match(req_path: str, cookie_path: str) -> bool:
"""Implements path matching adhering to RFC 6265."""
if not req_path.startswith("/"):
req_path = "/"
if req_path == cookie_path:
return True
if not req_path.startswith(cookie_path):
return False
if cookie_path.endswith("/"):
return True
non_matching = req_path[len(cookie_path):]
return non_matching.startswith("/")
@classmethod
def _parse_date(cls, date_str: str) -> Optional[datetime.datetime]:
"""Implements date string parsing adhering to RFC 6265."""
if not date_str:
return None
found_time = False
found_day = False
found_month = False
found_year = False
hour = minute = second = 0
day = 0
month = 0
year = 0
for token_match in cls.DATE_TOKENS_RE.finditer(date_str):
token = token_match.group("token")
if not found_time:
time_match = cls.DATE_HMS_TIME_RE.match(token)
if time_match:
found_time = True
hour, minute, second = [
int(s) for s in time_match.groups()]
continue
if not found_day:
day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)
if day_match:
found_day = True
day = int(day_match.group())
continue
if not found_month:
month_match = cls.DATE_MONTH_RE.match(token)
if month_match:
found_month = True
assert month_match.lastindex is not None
month = month_match.lastindex
continue
if not found_year:
year_match = cls.DATE_YEAR_RE.match(token)
if year_match:
found_year = True
year = int(year_match.group())
if 70 <= year <= 99:
year += 1900
elif 0 <= year <= 69:
year += 2000
if False in (found_day, found_month, found_year, found_time):
return None
if not 1 <= day <= 31:
return None
if year < 1601 or hour > 23 or minute > 59 or second > 59:
return None
return datetime.datetime(year, month, day,
hour, minute, second,
tzinfo=datetime.timezone.utc)
class DummyCookieJar(AbstractCookieJar):
"""Implements a dummy cookie storage.
It can be used with the ClientSession when no cookie processing is needed.
"""
def __init__(self, *,
loop: Optional[asyncio.AbstractEventLoop]=None) -> None:
super().__init__(loop=loop)
def __iter__(self) -> 'Iterator[Morsel[str]]':
while False:
yield None
def __len__(self) -> int:
return 0
def clear(self) -> None:
pass
def update_cookies(self,
cookies: LooseCookies,
response_url: URL=URL()) -> None:
pass
def filter_cookies(self, request_url: URL) -> 'BaseCookie[str]':
return SimpleCookie()
| 32.432 | 101 | 0.542921 |
acf14f411e9cee089175dcc5bc8a4925bd6f7299 | 2,512 | py | Python | kawaldpr/kawaldpr/urls.py | edwinlunando/kawaldpr | 998be9739bb5787b12e398bd3da35d70bfd24c36 | [
"MIT"
] | 2 | 2018-04-18T18:52:22.000Z | 2018-11-12T04:17:05.000Z | kawaldpr/kawaldpr/urls.py | edwinlunando/kawaldpr | 998be9739bb5787b12e398bd3da35d70bfd24c36 | [
"MIT"
] | null | null | null | kawaldpr/kawaldpr/urls.py | edwinlunando/kawaldpr | 998be9739bb5787b12e398bd3da35d70bfd24c36 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.auth.views import logout
from django.views.generic import TemplateView
from core import views as core_views
from legislature import views as legislature_views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', core_views.HomePage.as_view(), name='home'),
url(r'^sign-in/$', core_views.SignInPage.as_view(), name='sign-in'),
url(r'^sign-out/$', logout, {'next_page': '/'}, name='sign-out'),
url(r'^contact/$', core_views.ContactPage.as_view(), name='contact'),
url(r'^forgot-password/$', core_views.ForgotPasswordPage.as_view(), name='forgot-password'),
url(r'^reset-password/(?P<guid>[-_\w]+)$',
core_views.ResetPasswordPage.as_view(), name='reset-password'),
url(r'^media/(?P<slug>[-_\w]+)/$', legislature_views.MediumDetail.as_view(),
name='medium-detail'),
url(r'^dpr/(?P<slug>[-_\w]+)/$', legislature_views.LegislatureDetail.as_view(),
name='legislature-detail'),
url(r'^dpr/$', legislature_views.LegislatureList.as_view(), name='legislatures'),
# Examples:
# url(r'^$', 'kawaldpr.views.home', name='home'),
# url(r'^kawaldpr/', include('kawaldpr.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^grappelli/', include('grappelli.urls')), # grappelli URLS
(r'^ckeditor/', include('ckeditor.urls')),
# url(r'^admin/', include('admin_honeypot.urls')), # The fake admin URI
url(r'^backend/', include(admin.site.urls)), # The real admin URI
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| 49.254902 | 115 | 0.565685 |
acf14fc4735698717ec15cd16c7466e254b01a69 | 1,717 | py | Python | time_person_label.py | shinyichen/gaia-visualization | ebbd741a9e5668df4be1f13f432855009f8bc63d | [
"MIT"
] | null | null | null | time_person_label.py | shinyichen/gaia-visualization | ebbd741a9e5668df4be1f13f432855009f8bc63d | [
"MIT"
] | 2 | 2019-03-01T00:02:54.000Z | 2019-07-24T22:54:06.000Z | time_person_label.py | shinyichen/gaia-visualization | ebbd741a9e5668df4be1f13f432855009f8bc63d | [
"MIT"
] | 2 | 2018-08-16T20:57:03.000Z | 2019-03-05T22:07:28.000Z | from rdflib import URIRef
import pickle
def run(sparql, graph, file_path, namespaces):
pickled = pickle.load(open(file_path, 'rb'))
open_clause = close_clause = ''
if graph:
open_clause = 'GRAPH <%s> {' % graph
close_clause = '}'
def query_justification_lbl(uri):
query = """
SELECT ?lbl
WHERE {
%s
?ms aida:cluster ?cluster ;
aida:clusterMember/aida:justifiedBy/skos:prefLabel ?lbl .
%s
}
GROUP BY ?lbl
ORDER BY DESC(COUNT(?lbl))
LIMIT 1
""" % (open_clause, close_clause)
for lbl, in sparql.query(query, namespaces, {'cluster': URIRef(uri)}):
return lbl
def query_justification_label_for_cluster_by_type(typ, prefix=''):
for uri, cluster in pickled.items():
if cluster['label'] == typ and cluster['type'] == 'https://tac.nist.gov/tracks/SM-KBP/2019/ontologies/SeedlingOntology#' + typ:
label = query_justification_lbl(uri)
if label:
cluster['label'] = prefix + label
query_justification_label_for_cluster_by_type('Person', '[P]')
query_justification_label_for_cluster_by_type('Time', '[T]')
query_justification_label_for_cluster_by_type('Facility', '[F]')
query_justification_label_for_cluster_by_type('Money', '[M]')
query_justification_label_for_cluster_by_type('Location', '[L]')
query_justification_label_for_cluster_by_type('Weapon', '[W]')
query_justification_label_for_cluster_by_type('Organization', '[O]')
query_justification_label_for_cluster_by_type('Vehicle', '[V]')
pickle.dump(pickled, open(file_path, 'wb'))
| 33.666667 | 139 | 0.635993 |
acf14ffbd7b16efc2d0eea96e3e086e828bfd415 | 459 | py | Python | tests/test_ease.py | lematt1991/RecLab | 7ba212ac2ae346fb6dfeec232eef652d7f26e193 | [
"MIT"
] | 51 | 2020-09-17T08:51:42.000Z | 2022-03-26T20:44:48.000Z | tests/test_ease.py | kiminh/RecLab | 7fd29d1c780e91910008a322b04e1b1149a203c8 | [
"MIT"
] | 25 | 2020-09-04T00:12:44.000Z | 2021-10-05T02:21:58.000Z | tests/test_ease.py | kiminh/RecLab | 7fd29d1c780e91910008a322b04e1b1149a203c8 | [
"MIT"
] | 6 | 2020-11-30T03:34:25.000Z | 2022-02-08T18:27:48.000Z | """Tests for the EASE recommender."""
from reclab.recommenders import EASE
from . import utils
def test_predict():
"""Test that EASE predicts well and that it gets better with more data."""
recommender = EASE(lam=100, binarize=True)
utils.test_binary_recommend_ml100k(recommender, 0.1)
def test_recommend():
"""Test that EASE will recommend reasonable items."""
recommender = EASE(lam=100)
utils.test_recommend_simple(recommender)
| 28.6875 | 78 | 0.734205 |
acf150a909e7fc3102c33e467076bec88dd1c70d | 9,231 | py | Python | qopen/source.py | trichter/qopen | 998fe27ec2d98d46c76093bb70f477ab7bede1a6 | [
"MIT"
] | 25 | 2015-03-13T13:06:30.000Z | 2022-03-27T17:56:45.000Z | qopen/source.py | trichter/qopen | 998fe27ec2d98d46c76093bb70f477ab7bede1a6 | [
"MIT"
] | 5 | 2016-08-10T12:53:17.000Z | 2021-11-10T16:00:07.000Z | qopen/source.py | trichter/qopen | 998fe27ec2d98d46c76093bb70f477ab7bede1a6 | [
"MIT"
] | 11 | 2015-11-26T20:54:59.000Z | 2020-05-09T05:45:52.000Z | # -*- coding: utf-8 -*-
# Copyright 2015-2017 Tom Eulenfeld, MIT license
"""
Fit source displacement spectrum with source model
(and some other functions dealing with the source)
If you want to fit source displacement spectra on the command line again
use the ``qopen --calc-source-params`` option.
"""
import logging
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.robust.robust_linear_model import RLM
import scipy.optimize
from qopen.util import gstat
def sds(W, f, v, rho):
"""
Calculate source displacement spectrum ωM from spectral source energy W
according to Sato & Fehler (2012, p.188)
:param W: spectral source energy (J/Hz)
:param f,v,rho: frequency, mean velocity, mean density
:return: source displacement spectrum (in Nm)"""
return np.sqrt(W * 2.5 / np.pi * rho * v ** 5 / f ** 2)
def source_model(freq, M0, fc, n=2, gamma=1):
"""Model for source displacement spectrum (Abercrombie 1995)
:param freq: frequencies
:param M0: seismic moment (Nm)
:param fc: corner frequency
:param n: high frequency fall-of
:param gamma: corner sharpness
"""
return M0 * (1 + (freq / fc) ** (n * gamma)) ** (-1 / gamma)
def _source_model_ab(freq, M0, fc, a=2, b=1):
return M0 * (1 + (freq / fc) ** a) ** (-b)
def fit_sds(freq, omM, method='mean', fc=None, n=2, gamma=1,
fc_lim=None, n_lim=(0.5, 10), gamma_lim=(0.5, 10),
fc0=10, n0=2, gamma0=1, fall_back=5, num_points=None, **opt_kw):
"""Fit source displacement spectrum and calculate seismic moment
:param freq,omM: frequencies, source displacement spectrum (same length)
:param method: 'mean' - take mean of sds of frequencies below fc,
'fit', 'robust_fit' - fit source model to obtain M0.
If one or more of fc, n, gamma are None, M0 and these values are
simultaneously determined.
Robust version uses a robust linear model (which downweights outliers).
:param fc,n,gamma: corner frequency and coefficients for source model
:param fc_lim,gamma_lim: bounds for corner frequency and gamma
(used for optimization if respective variable is set to None)
:param fc0,gamma0: starting values of fc and gamma for optimization
(only used for optimization for fc and gamma)
:param fall_back: use robust fit only if number of data points >= fall_back
:param num_points: determine M0 only if number of data points >= num_points
All other kwargs are passed to scipy.optimization, e.g.
:param tol: tolerance for optimization
:return: dictionary with M0 and optimized variables fc, n, and gamma
if applicable.
If M0 is not determined the function will return None
"""
if method == 'mean':
if fc is None:
msg = ("Border frequency fc must be given for "
"seismic_moment_method 'mean'")
raise ValueError(msg)
M0 = [o for f, o in zip(freq, omM) if f < fc and o is not None]
if num_points is not None and len(M0) < num_points:
return
if len(M0) > 0:
mean, err = gstat(M0, unbiased=False)
return {'M0': np.exp(mean), 'fit_error': err}
elif method in ('fit', 'robust_fit'):
omM = np.array(omM, dtype=float)
freq = np.array(freq)[~np.isnan(omM)]
omM = omM[~np.isnan(omM)]
if len(freq) == 0 or num_points is not None and len(freq) < num_points:
return
if method == 'robust_fit' and len(freq) >= fall_back:
Model = RLM
else:
Model = OLS
def lstsq(fc, n, gamma, opt=False):
# Inversion for M0
model = source_model(freq, 1, fc, n, gamma)
y = np.log(omM) - np.log(model)
X = np.ones(len(y))
res = Model(y, X).fit()
err = np.mean(res.resid ** 2)
if opt:
return err
return {'M0': np.exp(res.params[0]), 'fit_error': err ** 0.5}
def lstsqab(fc, a, opt=False):
# Inversion for M0 and b
model = _source_model_ab(freq, 1, fc, a, 1)
y = np.log(omM)
X = np.empty((len(y), 2))
X[:, 0] = 1
X[:, 1] = np.log(model)
res = Model(y, X).fit()
err = np.mean(res.resid ** 2)
if opt:
return err
return {'M0': np.exp(res.params[0]), 'b': res.params[1],
'fit_error': err ** 0.5}
unknowns = ((fc is None) * ('fc',) +
(n is None) * ('n',) + (gamma is None) * ('gamma',))
if n is None and gamma is None:
unknowns = (fc is None) * ('fc',) + ('a',)
wrapper = {
'fc': lambda x, opt=False: lstsq(x, n, gamma, opt=opt),
'n': lambda x, opt=False: lstsq(fc, x, gamma, opt=opt),
'gamma': lambda x, opt=False: lstsq(fc, n, x, opt=opt),
'fcn': lambda x, opt=False: lstsq(x[0], x[1], gamma, opt=opt),
'fcgamma': lambda x, opt=False: lstsq(x[0], n, x[1], opt=opt),
'a': lambda x, opt=False: lstsqab(fc, x, opt=opt),
'fca': lambda x, opt=False: lstsqab(x[0], x[1], opt=opt),
}
a_lim = None
if n_lim and gamma_lim:
a_lim = [n_lim[0] * gamma_lim[0], n_lim[1] * gamma_lim[1]]
bounds = {'fc': fc_lim or (freq[0], freq[-1]), 'n': n_lim,
'gamma': gamma_lim, 'a': a_lim}
start = {'fc': fc0, 'n': n0, 'gamma': gamma0, 'a': gamma0 * n0}
result = {}
if len(unknowns) == 0:
return lstsq(fc, n, gamma)
elif len(unknowns) == 1 and len(freq) > 1:
optimize = scipy.optimize.minimize_scalar
x = unknowns[0]
lstsq2 = wrapper[x]
opt = optimize(lstsq2, args=(True,), bounds=bounds[x],
method='bounded', **opt_kw)
result = {x: opt.x}
result.update(lstsq2(opt.x))
elif len(freq) > len(unknowns) >= 2:
optimize = scipy.optimize.minimize
lstsq2 = wrapper[''.join(unknowns)]
bounds = [bounds[u] for u in unknowns]
x0 = [start[u] for u in unknowns]
opt = optimize(lstsq2, x0, args=(True,), bounds=bounds, **opt_kw)
result = {u: opt.x[i] for i, u in enumerate(unknowns)}
result.update(lstsq2(opt.x))
msg = 'Optimization for M0 and %s terminated because of %s'
log = logging.getLogger('qopen.source')
log.debug(msg, unknowns, opt.message.lower())
if 'a' in result:
a = result.pop('a')
b = result.pop('b')
result['gamma'] = 1 / b
result['n'] = a * b
return result
def moment_magnitude(M0, inverse=False):
"""
Moment magnitude Mw from seismic moment M0
Based on Kanamori (1997), an alternative definition is based on
Hanks and Kanamori (1999) with an offset of -6.03.
:param M0: seismic moment in Nm
:param inverse: return the inverse relation ship M0(Mw)
"""
if inverse:
Mw = M0
return 10 ** (1.5 * (Mw + 6.07))
return 2 / 3 * np.log10(M0) - 6.07
def calculate_source_properties(results, rh0=None, v0=None,
seismic_moment_method=None,
seismic_moment_options=None):
"""Calculate source porperties for results"""
conf = results.get('config', {})
rho0 = rh0 or conf.get('rho0')
v02 = v0 or conf.get('v0')
smm = seismic_moment_method or conf.get('seismic_moment_method')
smo = seismic_moment_options or conf.get('seismic_moment_options')
freq = results.get('freq')
if rho0:
for r in dict(results['events']).values(): # dict from future.builtins
v0 = r.get('v0') or v02
r.pop('sds', None)
r.pop('M0', None)
r.pop('fc', None)
r.pop('n', None)
r.pop('gamma', None)
r.pop('fit_error', None)
if v0:
insert_source_properties(freq, r, v0, rho0, smm, smo)
return results
def insert_source_properties(freq, evresult, v0, rho0, seismic_moment_method,
seismic_moment_options, catmag=None):
"""Insert sds, Mw and possibly Mcat in evresult dictionary"""
from qopen.core import sort_dict
if evresult['W'] and rho0 and v0:
evresult['sds'] = []
for i, f in enumerate(freq):
if evresult['W'][i] and rho0 and v0:
evresult['sds'].append(sds(evresult['W'][i], f, v0, rho0))
else:
evresult['sds'].append(None)
if seismic_moment_method:
omM = evresult['sds']
fitresult = fit_sds(freq, omM, method=seismic_moment_method,
**seismic_moment_options)
if fitresult is not None:
if np.isnan(fitresult.get('fit_error', 1)):
fitresult['fit_error'] = None
evresult.update(fitresult)
evresult['Mw'] = moment_magnitude(fitresult['M0'])
if catmag is not None:
evresult['Mcat'] = catmag
return sort_dict(evresult)
| 39.618026 | 79 | 0.567544 |
acf150dcf36241bd8d76c030466dc5337038bc25 | 10,168 | py | Python | Data/AnswerPrediction/script/polymath.py | tinoryj/ASC2018-UESTC | 289dfb942a9cb119d9c31770097fcfe6d1f818e7 | [
"MIT"
] | 1 | 2018-03-17T18:39:25.000Z | 2018-03-17T18:39:25.000Z | Data/AnswerPrediction/script/polymath.py | tinoryj/ASC2018 | 289dfb942a9cb119d9c31770097fcfe6d1f818e7 | [
"MIT"
] | null | null | null | Data/AnswerPrediction/script/polymath.py | tinoryj/ASC2018 | 289dfb942a9cb119d9c31770097fcfe6d1f818e7 | [
"MIT"
] | null | null | null | import cntk as C
import numpy as np
from helpers import *
import pickle
import importlib
import os
class PolyMath:
def __init__(self, config_file):
data_config = importlib.import_module(config_file).data_config
model_config = importlib.import_module(config_file).model_config
self.word_count_threshold = data_config['word_count_threshold']
self.char_count_threshold = data_config['char_count_threshold']
self.word_size = data_config['word_size']
self.abs_path = os.path.dirname(os.path.abspath(__file__))
pickle_file = os.path.join(self.abs_path, data_config['pickle_file'])
with open(pickle_file, 'rb') as vf:
known, self.vocab, self.chars = pickle.load(vf)
self.wg_dim = known
self.wn_dim = len(self.vocab) - known
self.c_dim = len(self.chars)
self.a_dim = 1
self.hidden_dim = model_config['hidden_dim']
self.convs = model_config['char_convs']
self.dropout = model_config['dropout']
self.char_emb_dim = model_config['char_emb_dim']
self.highway_layers = model_config['highway_layers']
self.two_step = model_config['two_step']
self.use_cudnn = model_config['use_cudnn']
self.use_sparse = True
print('dropout', self.dropout)
print('use_cudnn', self.use_cudnn)
print('use_sparse', self.use_sparse)
def charcnn(self, x):
conv_out = C.layers.Sequential([
C.layers.Embedding(self.char_emb_dim),
C.layers.Dropout(self.dropout),
C.layers.Convolution2D((5,self.char_emb_dim), self.convs, activation=C.relu, init=C.glorot_uniform(), bias=True, init_bias=0, name='charcnn_conv')])(x)
return C.reduce_max(conv_out, axis=1) # workaround cudnn failure in GlobalMaxPooling
def embed(self):
# load glove
npglove = np.zeros((self.wg_dim, self.hidden_dim), dtype=np.float32)
with open(os.path.join(self.abs_path, 'glove.6B.100d.txt'), encoding='utf-8') as f:
for line in f:
parts = line.split()
word = parts[0].lower()
if word in self.vocab:
npglove[self.vocab[word],:] = np.asarray([float(p) for p in parts[1:]])
glove = C.constant(npglove)
nonglove = C.parameter(shape=(len(self.vocab) - self.wg_dim, self.hidden_dim), init=C.glorot_uniform(), name='TrainableE')
def func(wg, wn):
return C.times(wg, glove) + C.times(wn, nonglove)
return func
def input_layer(self,cgw,cnw,cc,qgw,qnw,qc):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
# we need to reshape because GlobalMaxPooling/reduce_max is retaining a trailing singleton dimension
# todo GlobalPooling/reduce_max should have a keepdims default to False
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
highway = HighwayNetwork(dim=2*self.hidden_dim, highway_layers=self.highway_layers)(embedded)
highway_drop = C.layers.Dropout(self.dropout)(highway)
processed = OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='input_rnn')(highway_drop)
qce = C.one_hot(qc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
cce = C.one_hot(cc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
q_processed = processed.clone(C.CloneMethod.share, {input_chars:qce, input_glove_words:qgw_ph, input_nonglove_words:qnw_ph})
c_processed = processed.clone(C.CloneMethod.share, {input_chars:cce, input_glove_words:cgw_ph, input_nonglove_words:cnw_ph})
return C.as_block(
C.combine([c_processed, q_processed]),
[(cgw_ph, cgw),(cnw_ph, cnw),(cc_ph, cc),(qgw_ph, qgw),(qnw_ph, qnw),(qc_ph, qc)],
'input_layer',
'input_layer')
def attention_layer(self, context, query):
q_processed = C.placeholder(shape=(2*self.hidden_dim,))
c_processed = C.placeholder(shape=(2*self.hidden_dim,))
#convert query's sequence axis to static
qvw, qvw_mask = C.sequence.unpack(q_processed, padding_value=0).outputs
# This part deserves some explanation
# It is the attention layer
# In the paper they use a 6 * dim dimensional vector
# here we split it in three parts because the different parts
# participate in very different operations
# so W * [h; u; h.* u] becomes w1 * h + w2 * u + w3 * (h.*u)
ws1 = C.parameter(shape=(2 * self.hidden_dim, 1), init=C.glorot_uniform())
ws2 = C.parameter(shape=(2 * self.hidden_dim, 1), init=C.glorot_uniform())
ws3 = C.parameter(shape=(1, 2 * self.hidden_dim), init=C.glorot_uniform())
att_bias = C.parameter(shape=(), init=0)
wh = C.times (c_processed, ws1)
wu = C.reshape(C.times (qvw, ws2), (-1,))
whu = C.reshape(C.reduce_sum(c_processed * C.sequence.broadcast_as(qvw * ws3, c_processed), axis=1), (-1,))
S = wh + whu + C.sequence.broadcast_as(wu, c_processed) + att_bias
# mask out values outside of Query, and fill in gaps with -1e+30 as neutral value for both reduce_log_sum_exp and reduce_max
qvw_mask_expanded = C.sequence.broadcast_as(qvw_mask, c_processed)
S = C.element_select(qvw_mask_expanded, S, C.constant(-1e+30))
q_attn = C.reshape(C.softmax(S), (-1,1))
#q_attn = print_node(q_attn)
c2q = C.reshape(C.reduce_sum(C.sequence.broadcast_as(qvw, q_attn) * q_attn, axis=0),(-1))
max_col = C.reduce_max(S)
c_attn = C.sequence.softmax(max_col)
htilde = C.sequence.reduce_sum(c_processed * c_attn)
q2c = C.sequence.broadcast_as(htilde, c_processed)
q2c_out = c_processed * q2c
att_context = C.splice(c_processed, c2q, c_processed * c2q, q2c_out)
return C.as_block(
att_context,
[(c_processed, context), (q_processed, query)],
'attention_layer',
'attention_layer')
def modeling_layer(self, attention_context):
att_context = C.placeholder(shape=(8*self.hidden_dim,))
#modeling layer
# todo: use dropout in optimized_rnn_stack from cudnn once API exposes it
mod_context = C.layers.Sequential([
C.layers.Dropout(self.dropout),
OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='model_rnn0'),
C.layers.Dropout(self.dropout),
OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='model_rnn1')])(att_context)
return C.as_block(
mod_context,
[(att_context, attention_context)],
'modeling_layer',
'modeling_layer')
def output_layer(self, attention_context, modeling_context):
att_context = C.placeholder(shape=(8*self.hidden_dim,))
mod_context = C.placeholder(shape=(2*self.hidden_dim,))
#output layer
start_logits = C.layers.Dense(1, name='out_start')(C.dropout(C.splice(mod_context, att_context), self.dropout))
if self.two_step:
start_hardmax = seq_hardmax(start_logits)
att_mod_ctx = C.sequence.last(C.sequence.gather(mod_context, start_hardmax))
else:
start_prob = C.softmax(start_logits)
att_mod_ctx = C.sequence.reduce_sum(mod_context * start_prob)
att_mod_ctx_expanded = C.sequence.broadcast_as(att_mod_ctx, att_context)
end_input = C.splice(att_context, mod_context, att_mod_ctx_expanded, mod_context * att_mod_ctx_expanded)
m2 = OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='output_rnn')(end_input)
end_logits = C.layers.Dense(1, name='out_end')(C.dropout(C.splice(m2, att_context), self.dropout))
return C.as_block(
C.combine([start_logits, end_logits]),
[(att_context, attention_context), (mod_context, modeling_context)],
'output_layer',
'output_layer')
def model(self):
c = C.Axis.new_unique_dynamic_axis('c')
q = C.Axis.new_unique_dynamic_axis('q')
b = C.Axis.default_batch_axis()
cgw = C.input_variable(self.wg_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cgw')
cnw = C.input_variable(self.wn_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cnw')
qgw = C.input_variable(self.wg_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qgw')
qnw = C.input_variable(self.wn_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qnw')
cc = C.input_variable((1,self.word_size), dynamic_axes=[b,c], name='cc')
qc = C.input_variable((1,self.word_size), dynamic_axes=[b,q], name='qc')
ab = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ab')
ae = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ae')
#input layer
c_processed, q_processed = self.input_layer(cgw,cnw,cc,qgw,qnw,qc).outputs
# attention layer
att_context = self.attention_layer(c_processed, q_processed)
# modeling layer
mod_context = self.modeling_layer(att_context)
# output layer
start_logits, end_logits = self.output_layer(att_context, mod_context).outputs
# loss
start_loss = seq_loss(start_logits, ab)
end_loss = seq_loss(end_logits, ae)
#paper_loss = start_loss + end_loss
new_loss = all_spans_loss(start_logits, ab, end_logits, ae)
return C.combine([start_logits, end_logits]), new_loss | 48.884615 | 163 | 0.652931 |
acf150e28f3b3169731beb201c49ca6f919978f3 | 3,234 | py | Python | obsei/sink/jira_sink.py | akar5h/obsei | 0cca33e5b0c3fa159e08d52fd4e6f323ea565cd4 | [
"Apache-2.0"
] | 1 | 2021-08-24T17:45:57.000Z | 2021-08-24T17:45:57.000Z | obsei/sink/jira_sink.py | akar5h/obsei | 0cca33e5b0c3fa159e08d52fd4e6f323ea565cd4 | [
"Apache-2.0"
] | null | null | null | obsei/sink/jira_sink.py | akar5h/obsei | 0cca33e5b0c3fa159e08d52fd4e6f323ea565cd4 | [
"Apache-2.0"
] | null | null | null | import logging
import textwrap
from typing import Any, Dict, List, Optional
from atlassian import Jira
from pydantic import PrivateAttr, SecretStr
from obsei.sink.base_sink import BaseSink, BaseSinkConfig, Convertor
from obsei.payload import TextPayload
from obsei.misc.utils import obj_to_markdown
logger = logging.getLogger(__name__)
class JiraPayloadConvertor(Convertor):
def convert(
self,
analyzer_response: TextPayload,
base_payload: Optional[Dict[str, Any]] = None,
**kwargs,
) -> dict:
summary_max_length = kwargs.get("summary_max_length", 50)
payload = base_payload or dict()
payload["description"] = obj_to_markdown(
obj=analyzer_response,
str_enclose_start="{quote}",
str_enclose_end="{quote}",
)
payload["summary"] = textwrap.shorten(
text=analyzer_response.processed_text, width=summary_max_length
)
# TODO: Find correct payload to update labels fields
labels_count = kwargs.get("labels_count", 1)
# labels = [v for k, v in sorted(analyzer_response.segmented_data.items(), key=lambda item: item[1])]
# payload['labels'] = [{"name": label} for label in labels[:labels_count]]
return payload
class JiraSinkConfig(BaseSinkConfig):
# This is done to avoid exposing member to API response
_jira_client: Jira = PrivateAttr()
TYPE: str = "Jira"
url: str
username: SecretStr
password: SecretStr
issue_type: Dict[str, str]
project: Dict[str, str]
update_history: bool = True
verify_ssl: bool = False
summary_max_length: int = 50
labels_count = 2 # Number of labels to fetch
def __init__(self, **data: Any):
super().__init__(**data)
self._jira_client = Jira(
url=self.url,
username=self.username.get_secret_value(),
password=self.password.get_secret_value(),
verify_ssl=self.verify_ssl,
)
def get_jira_client(self):
return self._jira_client
class JiraSink(BaseSink):
def __init__(self, convertor: Convertor = JiraPayloadConvertor(), **data: Any):
super().__init__(convertor=convertor, **data)
def send_data( # type: ignore[override]
self,
analyzer_responses: List[TextPayload],
config: JiraSinkConfig,
**kwargs,
):
responses = []
payloads = []
for analyzer_response in analyzer_responses:
payloads.append(
self.convertor.convert(
analyzer_response=analyzer_response,
base_payload={
"project": config.project,
"issuetype": config.issue_type,
},
summary_max_length=config.summary_max_length,
labels_count=config.labels_count,
)
)
for payload in payloads:
response = config.get_jira_client().create_issue(
fields=payload, update_history=config.update_history
)
logger.info(f"response='{response}'")
responses.append(response)
return responses
| 31.705882 | 109 | 0.620284 |
acf15244280be63c6c3ba530507a75acbcfa08bf | 1,455 | py | Python | gcloud/iam_auth/view_interceptors/apigw/claim_functionalization_task.py | springborland/bk-sops | a9057672c10efb5f2414a805a30ead4092429c76 | [
"Apache-2.0"
] | null | null | null | gcloud/iam_auth/view_interceptors/apigw/claim_functionalization_task.py | springborland/bk-sops | a9057672c10efb5f2414a805a30ead4092429c76 | [
"Apache-2.0"
] | null | null | null | gcloud/iam_auth/view_interceptors/apigw/claim_functionalization_task.py | springborland/bk-sops | a9057672c10efb5f2414a805a30ead4092429c76 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from iam import Action, Subject
from iam.shortcuts import allow_or_raise_auth_failed
from gcloud.iam_auth import IAMMeta
from gcloud.iam_auth import get_iam_client
from gcloud.iam_auth import res_factory
from gcloud.iam_auth.intercept import ViewInterceptor
iam = get_iam_client()
class FunctionTaskInterceptor(ViewInterceptor):
def process(self, request, *args, **kwargs):
if request.is_trust:
return
task_id = kwargs["task_id"]
subject = Subject("user", request.user.username)
action = Action(IAMMeta.TASK_CLAIM_ACTION)
resources = res_factory.resources_for_task(task_id)
allow_or_raise_auth_failed(iam, IAMMeta.SYSTEM_ID, subject, action, resources)
| 41.571429 | 115 | 0.771134 |
acf15330337e401fd09ef78699c55490fa8c11dd | 10,656 | py | Python | src/local/butler/create_config.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 5,023 | 2019-02-07T16:57:56.000Z | 2022-03-31T01:08:05.000Z | src/local/butler/create_config.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 2,303 | 2019-02-07T17:36:36.000Z | 2022-03-31T15:44:38.000Z | src/local/butler/create_config.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 564 | 2019-02-07T17:34:24.000Z | 2022-03-26T09:25:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for creating a new deployment config."""
import json
import os
import shutil
import subprocess
import sys
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import discovery
import google_auth_httplib2
import httplib2
from local.butler import appengine
from local.butler import common
_REQUIRED_SERVICES = (
'appengineflex.googleapis.com',
'bigquery-json.googleapis.com',
'cloudapis.googleapis.com',
'cloudbuild.googleapis.com',
'clouddebugger.googleapis.com',
'clouderrorreporting.googleapis.com',
'cloudprofiler.googleapis.com',
'cloudresourcemanager.googleapis.com',
'compute.googleapis.com',
'containerregistry.googleapis.com',
'datastore.googleapis.com',
'deploymentmanager.googleapis.com',
'file.googleapis.com',
'iam.googleapis.com',
'iamcredentials.googleapis.com',
'logging.googleapis.com',
'monitoring.googleapis.com',
'oslogin.googleapis.com',
'pubsub.googleapis.com',
'redis.googleapis.com',
'replicapool.googleapis.com',
'replicapoolupdater.googleapis.com',
'resourceviews.googleapis.com',
'siteverification.googleapis.com',
'sourcerepo.googleapis.com',
'stackdriver.googleapis.com',
'storage-api.googleapis.com',
'storage-component.googleapis.com',
'vpcaccess.googleapis.com',
)
_NUM_RETRIES = 2
_ENABLE_SERVICE_BATCH_SIZE = 19
class DomainVerifier(object):
"""Domain verifier."""
def __init__(self, oauth_client_secrets_path):
flow = InstalledAppFlow.from_client_secrets_file(
oauth_client_secrets_path,
scopes=['https://www.googleapis.com/auth/siteverification'])
credentials = flow.run_console()
http = google_auth_httplib2.AuthorizedHttp(
credentials, http=httplib2.Http())
self.api = discovery.build('siteVerification', 'v1', http=http)
def get_domain_verification_tag(self, domain):
"""Get the domain verification meta tag."""
response = self.api.webResource().getToken(
body={
'verificationMethod': 'FILE',
'site': {
'identifier': domain,
'type': 'SITE',
}
}).execute(num_retries=_NUM_RETRIES)
return response['token']
def verify(self, domain):
"""Verify the domain verification meta tag."""
self.api.webResource().insert(
body={
'site': {
'identifier': domain,
'type': 'SITE',
}
},
verificationMethod='FILE').execute(num_retries=_NUM_RETRIES)
def add_owner(self, domain, email):
"""Add a new domain owner."""
response = self.api.webResource().get(id=domain).execute(
num_retries=_NUM_RETRIES)
if email not in response['owners']:
response['owners'].append(email)
self.api.webResource().update(
id=domain, body=response).execute(num_retries=_NUM_RETRIES)
def get_numeric_project_id(gcloud, project_id):
"""Get the numeric project ID."""
project_info = json.loads(
gcloud.run('projects', 'describe', project_id, '--format=json'))
return project_info['projectNumber']
def app_engine_service_account(project_id):
"""Get the default App Engine service account."""
return project_id + '@appspot.gserviceaccount.com'
def compute_engine_service_account(gcloud, project_id):
"""Get the default compute engine service account."""
return (get_numeric_project_id(gcloud, project_id) +
'-compute@developer.gserviceaccount.com')
def enable_services(gcloud):
"""Enable required services."""
for i in range(0, len(_REQUIRED_SERVICES), _ENABLE_SERVICE_BATCH_SIZE):
end = i + _ENABLE_SERVICE_BATCH_SIZE
gcloud.run('services', 'enable', *_REQUIRED_SERVICES[i:i + end])
def replace_file_contents(file_path, replacements):
"""Replace contents of a file."""
with open(file_path) as f:
old_contents = f.read()
contents = old_contents
for find, replace in replacements:
contents = contents.replace(find, replace)
if contents == old_contents:
return
with open(file_path, 'w') as f:
f.write(contents)
def project_bucket(project_id, bucket_name):
"""Return a project-specific bucket name."""
return '{name}.{project_id}.appspot.com'.format(
name=bucket_name, project_id=project_id)
def create_new_config(gcloud, project_id, new_config_dir,
domain_verification_tag, bucket_replacements,
gae_location, gce_zone, firebase_api_key):
"""Create a new config directory."""
if os.path.exists(new_config_dir):
print('Overwriting existing directory.')
shutil.rmtree(new_config_dir)
gae_region = appengine.region_from_location(gae_location)
replacements = [
('test-clusterfuzz-service-account-email',
compute_engine_service_account(gcloud, project_id)),
('test-clusterfuzz', project_id),
('test-project', project_id),
('domain-verification-tag', domain_verification_tag),
('gae-region', gae_region),
('gce-zone', gce_zone),
('firebase-api-key', firebase_api_key),
]
replacements.extend(bucket_replacements)
shutil.copytree(os.path.join('configs', 'test'), new_config_dir)
for root_dir, _, filenames in os.walk(new_config_dir):
for filename in filenames:
file_path = os.path.join(root_dir, filename)
replace_file_contents(file_path, replacements)
def deploy_appengine(gcloud, config_dir, appengine_location):
"""Deploy to App Engine."""
try:
gcloud.run('app', 'describe')
except common.GcloudError:
# Create new App Engine app if it does not exist.
gcloud.run('app', 'create', '--region=' + appengine_location)
subprocess.check_call([
'python', 'butler.py', 'deploy', '--force', '--targets', 'appengine',
'--prod', '--config-dir', config_dir
])
def deploy_zips(config_dir):
"""Deploy source zips."""
subprocess.check_call([
'python', 'butler.py', 'deploy', '--force', '--targets', 'zips', '--prod',
'--config-dir', config_dir
])
def create_buckets(project_id, buckets):
"""Create buckets."""
gsutil = common.Gsutil()
for bucket in buckets:
try:
gsutil.run('defstorageclass', 'get', 'gs://' + bucket)
except common.GsutilError:
# Create the bucket if it does not exist.
gsutil.run('mb', '-p', project_id, 'gs://' + bucket)
def set_cors(config_dir, buckets):
"""Sets cors settings."""
gsutil = common.Gsutil()
cors_file_path = os.path.join(config_dir, 'gae', 'cors.json')
for bucket in buckets:
gsutil.run('cors', 'set', cors_file_path, 'gs://' + bucket)
def add_service_account_role(gcloud, project_id, service_account, role):
"""Add an IAM role to a service account."""
gcloud.run('projects', 'add-iam-policy-binding', project_id, '--member',
'serviceAccount:' + service_account, '--role', role)
def execute(args):
"""Create a new config directory and deployment."""
# Check this early on, as the deployment at the end would fail otherwise.
if common.is_git_dirty():
print('Your checkout contains uncommitted changes. Cannot proceed.')
sys.exit(1)
verifier = DomainVerifier(args.oauth_client_secrets_path)
gcloud = common.Gcloud(args.project_id)
enable_services(gcloud)
# Get tag for domain verification.
appspot_domain = 'https://' + args.project_id + '.appspot.com/'
domain_verification_tag = verifier.get_domain_verification_tag(appspot_domain)
blobs_bucket = project_bucket(args.project_id, 'blobs')
deployment_bucket = project_bucket(args.project_id, 'deployment')
bucket_replacements = (
('test-blobs-bucket', blobs_bucket),
('test-deployment-bucket', deployment_bucket),
('test-bigquery-bucket', project_bucket(args.project_id, 'bigquery')),
('test-backup-bucket', project_bucket(args.project_id, 'backup')),
('test-coverage-bucket', project_bucket(args.project_id, 'coverage')),
('test-fuzzer-logs-bucket', project_bucket(args.project_id,
'fuzzer-logs')),
('test-corpus-bucket', project_bucket(args.project_id, 'corpus')),
('test-quarantine-bucket', project_bucket(args.project_id, 'quarantine')),
('test-shared-corpus-bucket',
project_bucket(args.project_id, 'shared-corpus')),
('test-fuzz-logs-bucket', project_bucket(args.project_id, 'fuzz-logs')),
('test-mutator-plugins-bucket',
project_bucket(args.project_id, 'mutator-plugins')),
)
# Write new configs.
create_new_config(gcloud, args.project_id, args.new_config_dir,
domain_verification_tag, bucket_replacements,
args.appengine_location, args.gce_zone,
args.firebase_api_key)
prev_dir = os.getcwd()
os.chdir(args.new_config_dir)
# Deploy App Engine and finish verification of domain.
os.chdir(prev_dir)
deploy_appengine(
gcloud, args.new_config_dir, appengine_location=args.appengine_location)
verifier.verify(appspot_domain)
# App Engine service account requires:
# - Domain ownership to create domain namespaced GCS buckets
# - Datastore export permission for periodic backups.
# - Service account signing permission for GCS uploads.
service_account = app_engine_service_account(args.project_id)
verifier.add_owner(appspot_domain, service_account)
add_service_account_role(gcloud, args.project_id, service_account,
'roles/datastore.importExportAdmin')
add_service_account_role(gcloud, args.project_id, service_account,
'roles/iam.serviceAccountTokenCreator')
# Create buckets now that domain is verified.
create_buckets(args.project_id, [bucket for _, bucket in bucket_replacements])
# Set CORS settings on the buckets.
set_cors(args.new_config_dir, [blobs_bucket])
# Set deployment bucket for the cloud project.
gcloud.run('compute', 'project-info', 'add-metadata',
'--metadata=deployment-bucket=' + deployment_bucket)
# Deploy source zips.
deploy_zips(args.new_config_dir)
| 34.485437 | 80 | 0.696321 |
acf15447a9d98c2c33e5009408044f5e2e06cbea | 4,998 | py | Python | torch2trt/tests/torchvision/classification.py | nuhpiskin/torch2trt | ce39d035d067cd6596dbed60aa05b37051915c74 | [
"MIT"
] | 3,363 | 2019-06-21T04:43:02.000Z | 2022-03-31T20:08:31.000Z | torch2trt/tests/torchvision/classification.py | maronuu/torch2trt | 311f328cd45799ad8d72f1bebcc818d71c301f62 | [
"MIT"
] | 592 | 2019-06-24T08:25:55.000Z | 2022-03-31T06:37:37.000Z | torch2trt/tests/torchvision/classification.py | maronuu/torch2trt | 311f328cd45799ad8d72f1bebcc818d71c301f62 | [
"MIT"
] | 606 | 2019-06-23T04:16:38.000Z | 2022-03-31T09:22:15.000Z | import torch
import torchvision
from torch2trt.module_test import add_module_test
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def alexnet():
return torchvision.models.alexnet(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def squeezenet1_0():
return torchvision.models.squeezenet1_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def squeezenet1_1():
return torchvision.models.squeezenet1_1(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet18():
return torchvision.models.resnet18(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet34():
return torchvision.models.resnet34(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet50():
return torchvision.models.resnet50(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet101():
return torchvision.models.resnet101(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet152():
return torchvision.models.resnet152(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet121():
return torchvision.models.densenet121(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet169():
return torchvision.models.densenet169(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet201():
return torchvision.models.densenet201(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet161():
return torchvision.models.densenet161(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg11():
return torchvision.models.vgg11(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg13():
return torchvision.models.vgg13(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg16():
return torchvision.models.vgg16(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg19():
return torchvision.models.vgg19(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg11_bn():
return torchvision.models.vgg11_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg13_bn():
return torchvision.models.vgg13_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg16_bn():
return torchvision.models.vgg16_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg19_bn():
return torchvision.models.vgg19_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mobilenet_v2():
return torchvision.models.mobilenet_v2(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x0_5():
return torchvision.models.shufflenet_v2_x0_5(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x1_0():
return torchvision.models.shufflenet_v2_x1_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x1_5():
return torchvision.models.shufflenet_v2_x1_5(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x2_0():
return torchvision.models.shufflenet_v2_x2_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet0_5():
return torchvision.models.mnasnet0_5(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet0_75():
return torchvision.models.mnasnet0_75(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet1_0():
return torchvision.models.mnasnet1_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet1_3():
return torchvision.models.mnasnet1_3(pretrained=False) | 33.77027 | 89 | 0.736495 |
acf154748ccd16b3e4bb444076481bd0be6843e7 | 723 | py | Python | app/models.py | Kadas36/NEWS-App | ee0504ed04f5e3d8c7e06ea478d163e8209ac425 | [
"MIT"
] | null | null | null | app/models.py | Kadas36/NEWS-App | ee0504ed04f5e3d8c7e06ea478d163e8209ac425 | [
"MIT"
] | null | null | null | app/models.py | Kadas36/NEWS-App | ee0504ed04f5e3d8c7e06ea478d163e8209ac425 | [
"MIT"
] | null | null | null | class Source:
'''
Class to define news source object
'''
def __init__(self, id, name, description, url, category, country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.country = country
class Article:
'''
A class that defines our articles object
'''
def __init__(self, id ,name, title, author, description, url, urlToImage, publishedAt):
self.id = id
self.name = name
self.title = title
self.author = author
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt | 25.821429 | 91 | 0.598893 |
acf1568960e2d833620794779b1a9d4dd420c473 | 1,629 | py | Python | 01 from DL to NN/notmnist/dataset_reformat.py | dkorenci/udacity_deeplearn | 149273733a8075381baf33fb0aa1b17a4a681dde | [
"Apache-2.0"
] | null | null | null | 01 from DL to NN/notmnist/dataset_reformat.py | dkorenci/udacity_deeplearn | 149273733a8075381baf33fb0aa1b17a4a681dde | [
"Apache-2.0"
] | null | null | null | 01 from DL to NN/notmnist/dataset_reformat.py | dkorenci/udacity_deeplearn | 149273733a8075381baf33fb0aa1b17a4a681dde | [
"Apache-2.0"
] | null | null | null | from notmnist.dataset import loadDataset, saveDataset, printDsetShapes
from notmnist.settings import image_size, num_labels
import numpy as np
def reformat1d(dataset, labels):
'''
Flatten last two dimensions (holding image pixels) from 2d array to 1d array
Turn lables from integers to 1-hot encoding.
:return:
'''
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
def reformatDataset(ds, reformatter):
new = {}
new['train_dataset'], new['train_labels'] = reformatter(ds['train_dataset'], ds['train_labels'])
new['valid_dataset'], new['valid_labels'] = reformatter(ds['valid_dataset'], ds['valid_labels'])
new['test_dataset'], new['test_labels'] = reformatter(ds['test_dataset'], ds['test_labels'])
return new
def reformatConv(dataset, labels, image_size = 28, num_labels = 10, num_channels = 1):
'''
Reformat MNIST dataset for convolutional networks, turning images to 3d
arrays by adding additional 'channel' dimension.
'''
dataset = dataset.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
if __name__ == '__main__':
ds = loadDataset()
#ds = loadDataset('notMNIST_reformatted_1d_images.pickle')
#printDsetShapes(ds)
# nds = reformatDataset(ds, reformatConv)
# printDsetShapes(nds)
# saveDataset(nds, 'notMNIST_reformatted_conv.pickle') | 40.725 | 100 | 0.703499 |
acf158233dead7508b47503ad74aa459c2cb5882 | 3,371 | py | Python | app/view/attr.py | okriuchykhin/anfisa | cda08e649c5a313c7d52f9b4426558c7388a73b0 | [
"Apache-2.0"
] | null | null | null | app/view/attr.py | okriuchykhin/anfisa | cda08e649c5a313c7d52f9b4426558c7388a73b0 | [
"Apache-2.0"
] | null | null | null | app/view/attr.py | okriuchykhin/anfisa | cda08e649c5a313c7d52f9b4426558c7388a73b0 | [
"Apache-2.0"
] | null | null | null | from utils.log_err import logException
from .view_repr import jsonHtmlRepr, htmlEscape
#===============================================
class AttrH:
@classmethod
def normLink(cls, value):
return value
#===============================================
def __init__(self, name, kind = None, title = None,
is_seq = False, tooltip = None):
self.mAspect = None
self.mName = name
self.mTitle = (title if title is not None else name)
self.mKinds = kind.split() if kind else ["norm"]
self.mToolTip = tooltip
self.mIsSeq = is_seq
self.mResearchOnly = "research" in self.mKinds
def setAspect(self, asp):
self.mAspect = asp
def reset(self, kind, is_seq):
self.mKinds = kind.split() if kind else ["norm"]
self.mIsSeq = is_seq
def getName(self):
return self.mName
def getTitle(self):
return self.mTitle
def isSeq(self):
return self.mIsSeq
def getToolTip(self):
return self.mToolTip
def hasKind(self, kind):
return kind in self.mKinds
def getMainKind(self):
return self.mKinds[0]
def getKinds(self):
return iter(self.mKinds)
def getFullName(self):
return self.mAspect.getName() + '.' + self.mName
def checkResearchBlock(self, research_mode):
return (not research_mode) and self.mResearchOnly
#===============================================
def dump(self):
ret = {
"name": self.mName, "kind": " ".join(self.mKinds),
"title": self.mTitle, "is_seq": self.mIsSeq}
if self.mToolTip:
ret["tooltip"] = self.mToolTip
return ret
@classmethod
def load(cls, data):
return cls(data["name"], data["kind"], data["title"],
is_seq = data["is_seq"], tooltip = data.get("tooltip"))
#===============================================
def htmlRepr(self, obj, top_rec_obj):
try:
val_obj = obj.get(self.mName) if obj else None
repr_text = None
if val_obj is 0:
return ("0", self.getMainKind())
if val_obj:
if self.mIsSeq:
seq = []
for it_obj in val_obj:
it_repr = self._htmlRepr(it_obj)
if it_repr:
seq.append(it_repr)
repr_text = ', '.join(seq)
else:
repr_text = self._htmlRepr(val_obj)
if repr_text is None:
return ("-", "none")
if not repr_text:
return (" ", "none")
return (repr_text, self.getMainKind())
except Exception:
logException("Problem with attribute %s: obj = %r" %
(self.getFullName(), val_obj))
return ("???", "none")
def _htmlRepr(self, value):
if not value and value is not 0:
return None
if "json" in self.mKinds:
return jsonHtmlRepr(value)
if not value:
return None
if "link" in self.mKinds:
link = self.normLink(value)
return ('<span title="%s"><a href="%s" target="blank">'
'link</a></span>' % (link, link))
return htmlEscape(value)
| 31.212963 | 67 | 0.501928 |
acf1583b98f10b87da52f0bf19ecb05a7b477cb4 | 3,847 | py | Python | autumn/tools/runs/calibration/managed.py | jtrauer/AuTuMN | 2e1defd0104bbecfe667b8ea5ecaf4bc6741905c | [
"BSD-2-Clause-FreeBSD"
] | 14 | 2020-03-11T06:15:30.000Z | 2022-03-09T03:38:35.000Z | autumn/tools/runs/calibration/managed.py | jtrauer/AuTuMN | 2e1defd0104bbecfe667b8ea5ecaf4bc6741905c | [
"BSD-2-Clause-FreeBSD"
] | 96 | 2020-01-29T05:10:29.000Z | 2022-03-31T01:48:46.000Z | autumn/tools/runs/calibration/managed.py | monash-emu/AuTuMN | fa3b81ef54cf561e0e7364a48f4ff96585dc3310 | [
"BSD-2-Clause-FreeBSD"
] | 10 | 2020-04-24T00:38:00.000Z | 2021-08-19T16:19:03.000Z | import re
import yaml
from autumn.tools import db
from autumn.tools.runs.utils import collate_columns_to_urun
class ManagedCalibrationRun:
def __init__(self, manager):
self._manager = manager
self.data_path = self._manager.local_path / 'data/calibration_outputs'
self._collated_db = None
def get_mcmc_runs(self, raw=False, auto_download=True):
if self._collated_db is None:
db_path = self.data_path / 'mcmc_collated.db'
if not db_path.exists():
self._collate(auto_download)
self._collated_db = db.get_database(str(db_path))
runs = self._collated_db.query('mcmc_run')
if not raw:
runs = collate_columns_to_urun(runs)
runs = runs.pivot_table(index='urun')
return runs
def get_mcmc_params(self, raw=False, auto_download=True):
if self._collated_db is None:
db_path = self.data_path / 'mcmc_collated.db'
if not db_path.exists():
self._collate(auto_download)
self._collated_db = db.get_database(str(db_path))
params = self._collated_db.query('mcmc_params')
if not raw:
params = collate_columns_to_urun(params,drop=True)
params = params.pivot_table(index='urun',columns='name')
params.columns = params.columns.droplevel()
return params
def get_mle_params(self, auto_download=True):
return self._get_meta('mle-params.yml', auto_download)
def get_priors(self, auto_download=True):
return self._get_meta('priors-0.yml', auto_download)
def get_params(self, auto_download=True):
return self._get_meta('params-0.yml', auto_download)
def get_targets(self, auto_download=True):
return self._get_meta('targets-0.yml', auto_download)
def _get_meta(self, path_ext, auto_download=True):
meta_path = self.data_path / path_ext
if not meta_path.exists():
if auto_download:
self.download_meta()
else:
raise FileNotFoundError(meta_path)
return yaml.load(open(meta_path, 'r'), Loader=yaml.UnsafeLoader)
def _collate(self, auto_download=True):
try:
database_paths = db.load.find_db_paths(str(self.data_path))
except:
if auto_download:
self.download_mcmc()
database_paths = db.load.find_db_paths(str(self.data_path))
else:
raise FileNotFoundError(self.data_path, "Try downloading data")
collated_db_path = str(self.data_path / 'mcmc_collated.db')
db.process.collate_databases(
database_paths, collated_db_path, tables=["mcmc_run", "mcmc_params"]
)
def download_mcmc(self):
mcmc_mstr = f"{self._manager.run_id}/data/calibration_outputs/.*/mcmc_.*.parquet"
for f in self._manager.remote.list_contents():
m = re.match(mcmc_mstr, f)
if m:
print(f"Downloading {f}")
self._manager.remote.download(f)
def download_meta(self):
"""Download all metadata files - anything that's a .yml
"""
for f in self._manager.remote.list_contents('.yml'):
self._manager.remote.download(f)
def download_outputs(self, include_full_outputs=False):
if include_full_outputs:
fmatchstr = ".*outputs.parquet"
else:
fmatchstr = "derived_outputs.parquet"
output_str = f"{self._manager.run_id}/data/calibration_outputs/.*/{fmatchstr}"
for f in self._manager.remote.list_contents():
m = re.match(output_str, f)
if m:
print(f"Downloading {f}")
self._manager.remote.download(f)
| 38.089109 | 89 | 0.620483 |
acf158f90bfcce5416e37aa1e1311346de66083e | 19,542 | py | Python | pyorient/messages/records.py | brucetony/pyorient | d106350dc49b17605beb33e2f29f3ad11c637b88 | [
"Apache-2.0"
] | 142 | 2015-01-12T06:34:59.000Z | 2022-01-19T10:34:30.000Z | pyorient/messages/records.py | wangjingzhou10/pyorient | d106350dc49b17605beb33e2f29f3ad11c637b88 | [
"Apache-2.0"
] | 40 | 2015-05-01T07:24:07.000Z | 2021-08-30T10:08:37.000Z | pyorient/messages/records.py | wangjingzhou10/pyorient | d106350dc49b17605beb33e2f29f3ad11c637b88 | [
"Apache-2.0"
] | 53 | 2015-02-02T18:50:30.000Z | 2021-09-18T12:52:31.000Z | # -*- coding: utf-8 -*-
__author__ = 'Ostico <ostico@gmail.com>'
from .database import BaseMessage
from ..exceptions import PyOrientBadMethodCallException, \
PyOrientConnectionException
from ..otypes import OrientRecord
from ..constants import FIELD_BOOLEAN, FIELD_BYTE, FIELD_BYTES, \
FIELD_INT, FIELD_LONG, FIELD_SHORT, FIELD_STRING, RECORD_CREATE_OP, \
RECORD_DELETE_OP, RECORD_LOAD_OP, RECORD_TYPE_DOCUMENT, RECORD_UPDATE_OP, \
RECORD_TYPES
from ..utils import need_db_opened, parse_cluster_id, \
parse_cluster_position
#
# RECORD CREATE
#
# Create a new record. Returns the position in the cluster
# of the new record. New records can have version > 0 (since v1.0)
# in case the RID has been recycled.
#
# Request: (cluster-id:short)(record-content:bytes)(record-type:byte)(mode:byte)
# Response:
# (cluster-position:long)(record-version:int)(count-of-collection-changes)
# [(uuid-most-sig-bits:long)(uuid-least-sig-bits:long)(updated-file-id:long)
# (updated-page-index:long)(updated-page-offset:int)]*
#
# - datasegment-id the segment id to store the data (since version 10 - 1.0-SNAPSHOT).
# -1 Means default one. Removed since 2.0
# - record-type is:
# - 'b': raw bytes
# - 'f': flat data
# - 'd': document
#
# and mode is:
# - 0 = synchronous (default mode waits for the answer)
# - 1 = asynchronous (don't need an answer)
#
# The last part of response is referred to RidBag management.
# Take a look at the main page for more details.
#
class RecordCreateMessage(BaseMessage):
def __init__(self, _orient_socket):
super(RecordCreateMessage, self).__init__(_orient_socket)
self._data_segment_id = -1 # default
self._cluster_id = b'0'
self._record_content = OrientRecord
self._record_type = RECORD_TYPE_DOCUMENT
self._mode_async = 0 # means synchronous mode
# order matters
self._append((FIELD_BYTE, RECORD_CREATE_OP))
@need_db_opened
def prepare(self, params=None):
try:
# mandatory if not passed by method
self.set_cluster_id(params[0])
# mandatory if not passed by method
self._record_content = params[1]
self.set_record_type(params[2]) # optional
except IndexError:
# Use default for non existent indexes
pass
record = self._record_content
if not isinstance(record, OrientRecord):
record = self._record_content = OrientRecord(record)
o_record_enc = self.get_serializer().encode(record)
if self.get_protocol() < 24:
self._append((FIELD_INT, int(self._data_segment_id)))
self._append((FIELD_SHORT, int(self._cluster_id)))
self._append((FIELD_STRING, o_record_enc))
self._append((FIELD_BYTE, self._record_type))
self._append((FIELD_BOOLEAN, self._mode_async))
return super(RecordCreateMessage, self).prepare()
def fetch_response(self):
# skip execution in case of transaction
if self._orientSocket.in_transaction is True:
return self
if self.get_protocol() > 25:
self._append(FIELD_SHORT) # cluster-id
self._append(FIELD_LONG) # cluster-position
self._append(FIELD_INT) # record-version
result = super(RecordCreateMessage, self).fetch_response()
# There are some strange behaviours with protocols between 19 and 23
# the INT ( count-of-collection-changes ) in documentation
# is present, but don't know why,
#
# Not every time this INT is present!!!!
# On Protocol version between 21 and 23 record Upload/Create could
# not work
chng = 0
_changes = []
if self.get_protocol() > 21:
try:
chng = self._decode_field(FIELD_INT)
""" count-of-collection-changes """
except (PyOrientConnectionException, TypeError):
pass
try:
if chng > 0 and self.get_protocol() > 23:
for x in range(0, chng):
change = [
self._decode_field(FIELD_LONG), # (uuid-most-sig-bits:long)
self._decode_field(FIELD_LONG), # (uuid-least-sig-bits:long)
self._decode_field(FIELD_LONG), # (updated-file-id:long)
self._decode_field(FIELD_LONG), # (updated-page-index:long)
self._decode_field(FIELD_INT) # (updated-page-offset:int)
]
_changes.append(change)
except IndexError:
# Should not happen because of protocol check
pass
if self.get_protocol() > 25:
rid = "#" + str(result[0]) + ":" + str(result[1])
version = result[2]
else:
rid = "#" + self._cluster_id + ":" + str(result[0])
version = result[1]
self._record_content.update(
__version=version,
__rid=rid
)
return self._record_content # [ self._record_content, _changes ]
def set_data_segment_id(self, data_segment_id):
self._data_segment_id = data_segment_id
return self
def set_cluster_id(self, cluster_id):
self._cluster_id = parse_cluster_id(cluster_id)
return self
def set_record_content(self, record):
self._record_content = record
return self
def set_record_type(self, record_type):
if record_type in RECORD_TYPES:
# user choice storage if present
self._record_type = record_type
else:
raise PyOrientBadMethodCallException(
record_type + ' is not a valid record type', []
)
return self
def set_mode_async(self):
self._mode_async = 1
return self
#
# RECORD DELETE
#
# Delete a record by its RecordID. During the optimistic transaction
# the record will be deleted only if the versions match. Returns true
# if has been deleted otherwise false.
#
# Request: (cluster-id:short)(cluster-position:long)(record-version:int)(mode:byte)
# Response: (payload-status:byte)
#
# mode is:
# 0 = synchronous (default mode waits for the answer)
# 1 = asynchronous (don't need an answer)
#
# payload-status returns 1 if the record has been deleted, otherwise 0.
# If the record didn't exist 0 is returned.
#
class RecordDeleteMessage(BaseMessage):
def __init__(self, _orient_socket):
super(RecordDeleteMessage, self).__init__(_orient_socket)
self._cluster_id = b'0'
self._cluster_position = b'0'
self._record_version = -1
self._mode_async = 0 # means synchronous mode
# only needed for transactions
self._record_type = RECORD_TYPE_DOCUMENT
# order matters
self._append((FIELD_BYTE, RECORD_DELETE_OP))
@need_db_opened
def prepare(self, params=None):
try:
# mandatory if not passed by method
self.set_cluster_id(params[0])
# mandatory if not passed by method
self.set_cluster_position(params[1])
self._record_version = params[2] # optional
self._mode_async = params[3] # optional
except IndexError:
# Use default for non existent indexes
pass
self._append((FIELD_SHORT, int(self._cluster_id)))
self._append((FIELD_LONG, int(self._cluster_position)))
self._append((FIELD_INT, int(self._record_version)))
self._append((FIELD_BOOLEAN, self._mode_async))
return super(RecordDeleteMessage, self).prepare()
def fetch_response(self):
# skip execution in case of transaction
if self._orientSocket.in_transaction is True:
return self
self._append(FIELD_BOOLEAN) # payload-status
return super(RecordDeleteMessage, self).fetch_response()[0]
def set_record_version(self, _record_version):
self._record_version = _record_version
return self
def set_cluster_id(self, cluster_id):
self._cluster_id = parse_cluster_id(cluster_id)
return self
def set_cluster_position(self, _cluster_position):
self._cluster_position = parse_cluster_position(_cluster_position)
return self
def set_record_type(self, _record_type):
self._record_type = _record_type
return self
def set_mode_async(self):
self._mode_async = 1
return self
#
# RECORD LOAD
#
# Load a record by RecordID, according to a fetch plan
#
# Request: (cluster-id:short)(cluster-position:long)
# (fetch-plan:string)(ignore-cache:byte)(load-tombstones:byte)
# Response: [(payload-status:byte)[(record-content:bytes)
# (record-version:int)(record-type:byte)]*]+
#
# fetch-plan, the fetch plan to use or an empty string
# ignore-cache, tells if the cache must be ignored: 1 = ignore the cache,
# 0 = not ignore. since protocol v.9 (introduced in release 1.0rc9)
# load-tombstones, the flag which indicates whether information about
# deleted record should be loaded. The flag is applied only to autosharded
# storage and ignored otherwise.
#
# payload-status can be:
# 0: no records remain to be fetched
# 1: a record is returned as resultset
# 2: a record is returned as pre-fetched to be loaded in client's cache only.
# It's not part of the result set but the client knows that it's available for
# later access. This value is not currently used.
#
# record-type is
# 'b': raw bytes
# 'f': flat data
# 'd': document
#
class RecordLoadMessage(BaseMessage):
def __init__(self, _orient_socket):
super(RecordLoadMessage, self).__init__(_orient_socket)
self._record_id = ''
self._fetch_plan = '*:0'
self.cached_records = []
# order matters
self._append((FIELD_BYTE, RECORD_LOAD_OP))
@need_db_opened
def prepare(self, params=None):
try:
self._record_id = params[0] # mandatory if not passed with set
self._fetch_plan = params[1] # user choice if present
# callback function use to operate
# over the async fetched records
self.set_callback(params[2])
except IndexError:
# Use default for non existent indexes
pass
try:
_cluster = parse_cluster_id(self._record_id)
_position = parse_cluster_position(self._record_id)
except ValueError:
raise PyOrientBadMethodCallException("Not valid Rid to load: " + self._record_id, [])
self._append((FIELD_SHORT, int(_cluster)))
self._append((FIELD_LONG, int(_position)))
self._append((FIELD_STRING, self._fetch_plan))
self._append((FIELD_BYTE, "0"))
self._append((FIELD_BYTE, "0"))
return super(RecordLoadMessage, self).prepare()
def fetch_response(self):
self._append(FIELD_BYTE)
_status = super(RecordLoadMessage, self).fetch_response()[0]
_record = OrientRecord()
if _status != 0:
if self.get_protocol() > 27:
self._append(FIELD_BYTE) # record type
self._append(FIELD_INT) # record version
self._append(FIELD_BYTES) # record content
rec_position = 2
else:
self._append(FIELD_BYTES) # record content
self._append(FIELD_INT) # record version
self._append(FIELD_BYTE) # record type
rec_position = 0
__record = super(RecordLoadMessage, self).fetch_response(True)
# bug in orientdb csv serialization in snapshot 2.0,
# strip trailing spaces
class_name, data = self.get_serializer().decode(__record[rec_position].rstrip())
self._read_async_records() # get cache
_record = OrientRecord(
dict(
__o_storage=data,
__o_class=class_name,
__version=__record[1],
__rid=self._record_id
)
)
return _record
def set_record_id(self, _record_id):
self._record_id = _record_id
return self
def set_fetch_plan(self, _fetch_plan):
self._fetch_plan = _fetch_plan
return self
def set_callback(self, func):
if hasattr(func, '__call__'):
self._callback = func
else:
raise PyOrientBadMethodCallException(func + " is not a callable "
"function", [])
return self
#
# RECORD UPDATE
#
# Update a record. Returns the new record's version.
# Request: (cluster-id:short)(cluster-position:long)
# (update-content:boolean)(record-content:bytes)(record-version:int)
# (record-type:byte)(mode:byte)
# Response: (record-version:int)(count-of-collection-changes)
# [(uuid-most-sig-bits:long)(uuid-least-sig-bits:long)(updated-file-id:long)
# (updated-page-index:long)(updated-page-offset:int)]*
#
# Where record-type is:
# 'b': raw bytes
# 'f': flat data
# 'd': document
#
# and record-version policy is:
# '-1': Document update, version increment, no version control.
# '-2': Document update, no version control nor increment.
# '-3': Used internal in transaction rollback (version decrement).
# '>-1': Standard document update (version control).
#
# and mode is:
# 0 = synchronous (default mode waits for the answer)
# 1 = asynchronous (don't need an answer)
#
# and update-content is:
# true - content of record has been changed and content should
# be updated in storage
# false - the record was modified but its own content has
# not been changed. So related collections (e.g. rig-bags) have to
# be updated, but record version and content should not be.
#
# The last part of response is referred to RidBag management.
# Take a look at the main page for more details.
#
class RecordUpdateMessage(BaseMessage):
def __init__(self, _orient_socket):
super(RecordUpdateMessage, self).__init__(_orient_socket)
self._data_segment_id = -1 # default
self._cluster_id = b'0'
self._cluster_position = 0
self._record_content = ''
# True: content of record has been changed
# and content should be updated in storage
# False: the record was modified but its own
# content has not been changed.
# So related collections (e.g. rid-bags) have to be updated, but
# record version and content should not be.
# NOT USED before protocol 23
self._update_content = True
# > -1 default Standard document update (version control)
self._record_version_policy = -1
# Used for transactions
self._record_version = -1
self._record_type = RECORD_TYPE_DOCUMENT
self._mode_async = 0 # means synchronous mode
# order matters
self._append((FIELD_BYTE, RECORD_UPDATE_OP))
@need_db_opened
def prepare(self, params=None):
try:
# mandatory if not passed by method
self.set_cluster_id(params[0])
# mandatory if not passed by method
self.set_cluster_position(params[1])
# mandatory if not passed by method
self._record_content = params[2]
self._record_version = params[3] # Optional|Needed for transaction
self.set_record_type(params[4]) # optional
self._record_version_policy = params[5] # optional
self._mode_async = params[6] # optional
self._update_content = params[7] # optional
except IndexError:
# Use default for non existent indexes
pass
record = self._record_content
if not isinstance(record, OrientRecord):
record = self._record_content = OrientRecord(record)
o_record_enc = self.get_serializer().encode(record)
self._append((FIELD_SHORT, int(self._cluster_id)))
self._append((FIELD_LONG, int(self._cluster_position)))
if self.get_protocol() >= 23:
self._append((FIELD_BOOLEAN, self._update_content))
self._append((FIELD_STRING, o_record_enc))
self._append((FIELD_INT, int(self._record_version_policy)))
self._append((FIELD_BYTE, self._record_type))
self._append((FIELD_BOOLEAN, self._mode_async))
return super(RecordUpdateMessage, self).prepare()
def fetch_response(self):
# skip execution in case of transaction
if self._orientSocket.in_transaction is True:
return self
self._append(FIELD_INT) # record-version
result = super(RecordUpdateMessage, self).fetch_response()
# There are some strange behaviours with protocols between 19 and 23
# the INT ( count-of-collection-changes ) in documentation
# is present, but don't know why,
#
# Not every time this INT is present!!!!
# On Protocol version between 21 and 23 record Upload/Create could
# not work
chng = 0
_changes = []
if self.get_protocol() > 21:
try:
chng = self._decode_field(FIELD_INT)
""" count-of-collection-changes """
except (PyOrientConnectionException, TypeError):
pass
try:
if chng > 0 and self.get_protocol() > 23:
for x in range(0, chng):
change = [
self._decode_field(FIELD_LONG), # (uuid-most-sig-bits:long)
self._decode_field(FIELD_LONG), # (uuid-least-sig-bits:long)
self._decode_field(FIELD_LONG), # (updated-file-id:long)
self._decode_field(FIELD_LONG), # (updated-page-index:long)
self._decode_field(FIELD_INT) # (updated-page-offset:int)
]
_changes.append(change)
except IndexError:
# append an empty field
result.append(None)
self._record_content.update(
__version=result[0]
)
return [self._record_content, chng, _changes]
def set_data_segment_id(self, data_segment_id):
self._data_segment_id = data_segment_id
return self
def set_cluster_id(self, cluster_id):
self._cluster_id = parse_cluster_id(cluster_id)
return self
def set_cluster_position(self, _cluster_position):
self._cluster_position = parse_cluster_position(_cluster_position)
return self
def set_record_content(self, record):
self._record_content = record
return self
def set_record_type(self, record_type):
if record_type in RECORD_TYPES:
# user choice storage if present
self._record_type = record_type
else:
raise PyOrientBadMethodCallException(
record_type + ' is not a valid record type', []
)
return self
def set_mode_async(self):
self._mode_async = 1
return self
def set_record_version_policy(self, _policy):
self._record_version_policy = _policy
return self
def set_no_update_content(self):
self._update_content = False
return self
| 33.635112 | 97 | 0.62793 |
acf159059e6376d97e71e99b901a92152fd16579 | 459 | py | Python | redis_yy/__about__.py | guaidashu/redis_yy | c1ed760bf835414b1c1a66a5a47ca81203610ed0 | [
"MIT"
] | null | null | null | redis_yy/__about__.py | guaidashu/redis_yy | c1ed760bf835414b1c1a66a5a47ca81203610ed0 | [
"MIT"
] | null | null | null | redis_yy/__about__.py | guaidashu/redis_yy | c1ed760bf835414b1c1a66a5a47ca81203610ed0 | [
"MIT"
] | null | null | null | """
Create by yy on 2019/9/27
"""
__title__ = 'redis-yy'
__description__ = 'A tool which is used to connect redis, designed by yy'
__url__ = 'https://github.com/guaidashu/redis_yy'
__version_info__ = ('0', '0', '8')
__version__ = '.'.join(__version_info__)
__author__ = 'guaidashu'
__author_email__ = 'song42960@gmail.com'
__maintainer__ = 'YY blog'
__license__ = 'MIT'
__copyright__ = '(c) 2019 by guaidashu'
__install_requires__ = [
"redis <= 3.3.8"
]
| 25.5 | 73 | 0.703704 |
acf15b01f283e6440793ef0cfd10791cb87b7e53 | 2,349 | py | Python | server/agents/openchat/utils/terminal_utils.py | Panthereum/DigitalBeing | 7fda011f34dd62c03d1072035ae0ad2a129281a7 | [
"MIT"
] | 53 | 2021-07-20T04:01:57.000Z | 2022-03-13T17:31:08.000Z | server/agents/openchat/utils/terminal_utils.py | Panthereum/DigitalBeing | 7fda011f34dd62c03d1072035ae0ad2a129281a7 | [
"MIT"
] | 58 | 2021-08-20T02:22:16.000Z | 2021-12-13T10:38:58.000Z | server/agents/openchat/utils/terminal_utils.py | Panthereum/DigitalBeing | 7fda011f34dd62c03d1072035ae0ad2a129281a7 | [
"MIT"
] | 13 | 2021-08-23T20:16:14.000Z | 2022-01-31T23:59:21.000Z | import os
class Colors:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
RESET = '\033[0m'
def center(text):
try:
return text.center(os.get_terminal_size().columns)
except:
return text
def cprint(text, color=Colors.RESET, **kwargs):
print(color + text + Colors.RESET, **kwargs)
def cinput(text, color=Colors.RESET, **kwargs):
return input(color + text + Colors.RESET, **kwargs)
def draw_openchat():
logos = [
""" """,
""" """,
""" """,
""" ____ ___ ____ ___ _____ _ _ ____ _____ ___ _ _ ____ """,
""" | _ \ |_ _| / ___| |_ _| |_ _| / \ | | | __ ) | ____| |_ _| | \ | | / ___| """,
""" | | | | | | | | _ | | | | / _ \ | | | _ \ | _| | | | \| | | | _ """,
""" | |_| | | | | |_| | | | | | / ___ \ | |___ | |_) | | |___ | | | |\ | | |_| | """,
""" |____/ |___| \____| |___| |_| /_/ \_\ |_____| |____/ |_____| |___| |_| \_| \____| """,
""" """,
""" """,
""" ... LOADING ... """,
""" """,
""" """,
""" """,
]
for line in logos:
cprint(
text=center(line),
color=Colors.CYAN,
)
| 43.5 | 116 | 0.222648 |
acf15ba1b514789d2e242b9ad1465cb560462527 | 3,664 | py | Python | python3/koans/about_with_statements.py | MichaelSEA/python_koans | a85db58d3b52d6076e15594d86da9a92a7b44a15 | [
"MIT"
] | null | null | null | python3/koans/about_with_statements.py | MichaelSEA/python_koans | a85db58d3b52d6076e15594d86da9a92a7b44a15 | [
"MIT"
] | null | null | null | python3/koans/about_with_statements.py | MichaelSEA/python_koans | a85db58d3b52d6076e15594d86da9a92a7b44a15 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutSandwichCode in the Ruby Koans
#
from runner.koan import *
import re # For regular expression string comparisons
class AboutWithStatements(Koan):
def count_lines(self, file_name):
try:
file = open(file_name)
try:
return len(file.readlines())
finally:
file.close()
except IOError:
# should never happen
self.fail()
def test_counting_lines(self):
self.assertEqual(4, self.count_lines("example_file.txt"))
# ------------------------------------------------------------------
def find_line(self, file_name):
try:
file = open(file_name)
try:
for line in file.readlines():
match = re.search('e', line)
if match:
return line
finally:
file.close()
except IOError:
# should never happen
self.fail()
def test_finding_lines(self):
self.assertEqual('test\n', self.find_line("example_file.txt"))
## ------------------------------------------------------------------
## THINK ABOUT IT:
##
## The count_lines and find_line are similar, and yet different.
## They both follow the pattern of "sandwich code".
##
## Sandwich code is code that comes in three parts: (1) the top slice
## of bread, (2) the meat, and (3) the bottom slice of bread.
## The bread part of the sandwich almost always goes together, but
## the meat part changes all the time.
##
## Because the changing part of the sandwich code is in the middle,
## abstracting the top and bottom bread slices to a library can be
## difficult in many languages.
##
## (Aside for C++ programmers: The idiom of capturing allocated
## pointers in a smart pointer constructor is an attempt to deal with
## the problem of sandwich code for resource allocation.)
##
## Python solves the problem using Context Managers. Consider the
## following code:
##
class FileContextManager():
def __init__(self, file_name):
self._file_name = file_name
self._file = None
def __enter__(self):
self._file = open(self._file_name)
return self._file
def __exit__(self, cls, value, tb):
self._file.close()
# Now we write:
def count_lines2(self, file_name):
with self.FileContextManager(file_name) as file:
return len(file.readlines())
def test_counting_lines2(self):
self.assertEqual(4, self.count_lines2("example_file.txt"))
# ------------------------------------------------------------------
def find_line2(self, file_name):
# Rewrite find_line using the Context Manager.
with self.FileContextManager(file_name) as file:
for line in file.readlines():
match = re.search('e', line)
if match:
return line
pass
def test_finding_lines2(self):
self.assertEqual("test\n", self.find_line2("example_file.txt"))
self.assertNotEqual("test", self.find_line2("example_file.txt"))
# ------------------------------------------------------------------
def count_lines3(self, file_name):
with open(file_name) as file:
return len(file.readlines())
def test_open_already_has_its_own_built_in_context_manager(self):
self.assertEqual(4, self.count_lines3("example_file.txt"))
| 32.424779 | 73 | 0.553493 |
acf15bd6887d386d41d669c53673eefc6be520c4 | 1,571 | py | Python | check_nash.py | ekrell/fujin | e798e5f5235a621d0cd9f67d8c1d955da6971a6a | [
"MIT"
] | 2 | 2021-04-23T04:11:12.000Z | 2021-10-19T11:11:12.000Z | check_nash.py | ekrell/fujin | e798e5f5235a621d0cd9f67d8c1d955da6971a6a | [
"MIT"
] | null | null | null | check_nash.py | ekrell/fujin | e798e5f5235a621d0cd9f67d8c1d955da6971a6a | [
"MIT"
] | 4 | 2019-02-18T17:05:57.000Z | 2021-10-17T06:48:52.000Z | import solver_tools
import numpy as np
import time
gameTemplate = { "matrix" : None,
"pure_nash" : None,
"mixed_nash" : None,
"name" : None,
}
def checkGame(game, method = 0):
start_time = time.time()
y, z, i, j, v = solver_tools.solveGame(game["matrix"], method)
duration = time.time() - start_time
print("Game %s" % (game["name"]))
print(" Known pure Nash:"),
for idx in range(len(game["pure_nash"])):
print("(%d, %d), " % (game["pure_nash"][idx][0], game["pure_nash"][idx][1])),
print("")
print(" Found pure Nash: (%d, %d)" % (i, j))
print(" Known mixed Nash:")
print(" y:", game["mixed_nash"][0])
print(" z:", game["mixed_nash"][1])
print(" Found mixed Nash:")
print(" y:", list(y))
print(" z:", list(z))
print("Solver (method %d) took %s secs" % (method, duration))
print("------")
def main():
game_A = gameTemplate.copy()
game_A["name"] = "A"
game_A["matrix"] = np.array([[-1, -7, -3, -4], [-5, -6, -4, -5], [-7, -2, 0, -3]])
game_A["pure_nash"] = [(1, 2)]
game_A["mixed_nash"] = [(0, 1, 0), (0, 0, 1, 0)]
game_B = gameTemplate.copy()
game_B["name"] = "headsORtails"
game_B["matrix"] = np.array([[1, -1], [-1, 1]])
game_B["pure_nash"] = []
game_B["mixed_nash"] = [(0.5, 0.5), (0.5, 0.5)]
checkGame(game_A, method = 0)
print("\n\n")
checkGame(game_B, method = 0)
return None
if __name__ == "__main__":
main()
| 27.561404 | 90 | 0.502864 |
acf15bdd07deecf04cbbb576b3d3643027a39b7f | 8,683 | py | Python | Packs/ApiModules/Scripts/NGINXApiModule/NGINXApiModule_test.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 2 | 2021-12-06T21:38:24.000Z | 2022-01-13T08:23:36.000Z | Packs/ApiModules/Scripts/NGINXApiModule/NGINXApiModule_test.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 87 | 2022-02-23T12:10:53.000Z | 2022-03-31T11:29:05.000Z | Packs/ApiModules/Scripts/NGINXApiModule/NGINXApiModule_test.py | henry-sue-pa/content | 043c6badfb4f9c80673cad9242fdea72efe301f7 | [
"MIT"
] | 2 | 2022-01-05T15:27:01.000Z | 2022-02-01T19:27:43.000Z | from CommonServerPython import DemistoException
import pytest
import requests
import demistomock as demisto
from pathlib import Path
import os
from pytest import raises
from pytest_mock import MockerFixture
from time import sleep
import subprocess
from typing import Optional
SSL_TEST_KEY = '''-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDd5FcvCKgtXjkY
aiDdqpFAYKw6WxNEpZIGjzD9KhEqr7OZjpPoLeyGh1U6faAcN6XpkQugFA/2Gq+Z
j/pe1abiTCbctdE978FYVjXxbEEAtEn4x28s/bKah/xjjw+RjUyQB9DsioFkV1eN
9iJh5eOOIOjTDMBt7SxY1HivC0HjUKjCaMjdH2WxGu4na9phPOa7zixlgLqZGC8g
E1Ati5j3nOEOlmrNIf1Z/4zdJzEaMprBCymfEvrgMC7ibG9AokDcAj6Sl4xgvTRp
tTczCbUxF1jsnNNbuLyq/RuQ85SWB3mrRKT4OgPtz/ga3sm4l7Uq/YN71Gr/Lxaq
bkwWVMd/AgMBAAECggEBAKnMfacBYejtzJVRSXs3dlWkZMd3QGRsqzUXyG5DDcXz
lGVyxN6Mng5Ia8EJt0dAklcM5q+GCrzSqQPDON3vcviDO83z2H4kBXm65yarJ4cJ
b/3PZ9UvAsjcPRhWtpw0W51wTcFlMCT/7YE2FBOEX0E5D9HJVUwJjcEgPoX9AFuY
xYVpFvr1AoORde/RoJGoe+Z81hIRvcbrzfLHEMCB0pY0wxBuD5tyhEunIwLxG+6v
T1OHtuXDATEGabZQJKuhBfuP00YFRKxHIBLWPtkplQGFAXmBEeD5NIYfo+RBQFUH
GuvDTHoEvecn9ZHF4eOjJ88TXaGuXrFHwa0g0KMDNaECgYEA+ld2bkC4RXNWIzYI
4bOH7UBFrd74nz4zqNd2UZqP9R1al8nLgcESiT6izBbR+6wnNANIon5fXYGFK+wu
NGvKwuL1Xf04Ro/Z/i03QrV5fTgL/F8NX9F0kc6znxli9SrpswSjb1ZUoJmQXCew
ZYkCVavy3Zpgk8uHeeaHOOAI6k8CgYEA4uhC2Jy9Ysq6Eg79hVq0xHtXLl0VWfrU
5mugItrH90LmsCvKK4Qzg33BjhIMbE9vq63yFxW08845weuxUV6LalPSLOclE7D2
6exG5grcdGpqyWKc2qCAXP2uLys68cOfWduJoVUYsdAGbyNdvkI69VcTsI8pV6kR
bjzP+l50c9ECgYA3CVN4GbJpUln1k8OQGzAe8Kpg90whdkNVM0lH13seoD1ycWLU
O+YfVi3kQIAZnFdiD/bAAphkrjzg0yO1Up1ZCxx2dV0R5j4+qyIjAFKdPN0ltp/y
GNJP2+mRaLtguvZ17OchaxFf3WLnX7JgICbrPso9/dqNo4k9O3ku/9H18QKBgQDZ
LaMlfsgJ8a2ssSpYZBwW31LvbmqMR/dUX/jSw4KXmDICtrb3db50gX4rw/yeAl4I
/SF0lPMwU9eWU0fRcOORro7BKa+kLEH4XYzyi7y7tEtnW3p0CyExYCFCxmbRlgJE
WEtf3noXXtt5rmkAPJX/0wtmd3ADli+3yn7pzVQ6sQKBgQDJJITERtov019Cwuux
fCRUIbRyUH/PCN/VvsuKFs+BWbFTnqBXRDQetzTyuUvNKiL7GmWQuR/QpgYjLd9W
jxAayhtcVKeL96dqimK9twmw/NC5DveOVoReXx7io4gicmQi7AGq5WRkm8NUZRVE
1dH1Hhp7kjnPlUOUBvKf8mfFxQ==
-----END PRIVATE KEY-----
'''
SSL_TEST_CRT = '''-----BEGIN CERTIFICATE-----
MIIDeTCCAmGgAwIBAgIUaam3vV40bjLs7mabludFi6dRsxkwDQYJKoZIhvcNAQEL
BQAwTDELMAkGA1UEBhMCSUwxEzARBgNVBAgMClNvbWUtU3RhdGUxEzARBgNVBAoM
ClhTT0FSIFRlc3QxEzARBgNVBAMMCnhzb2FyLXRlc3QwHhcNMjEwNTE2MTQzNDU0
WhcNMzAwODAyMTQzNDU0WjBMMQswCQYDVQQGEwJJTDETMBEGA1UECAwKU29tZS1T
dGF0ZTETMBEGA1UECgwKWFNPQVIgVGVzdDETMBEGA1UEAwwKeHNvYXItdGVzdDCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN3kVy8IqC1eORhqIN2qkUBg
rDpbE0SlkgaPMP0qESqvs5mOk+gt7IaHVTp9oBw3pemRC6AUD/Yar5mP+l7VpuJM
Jty10T3vwVhWNfFsQQC0SfjHbyz9spqH/GOPD5GNTJAH0OyKgWRXV432ImHl444g
6NMMwG3tLFjUeK8LQeNQqMJoyN0fZbEa7idr2mE85rvOLGWAupkYLyATUC2LmPec
4Q6Was0h/Vn/jN0nMRoymsELKZ8S+uAwLuJsb0CiQNwCPpKXjGC9NGm1NzMJtTEX
WOyc01u4vKr9G5DzlJYHeatEpPg6A+3P+BreybiXtSr9g3vUav8vFqpuTBZUx38C
AwEAAaNTMFEwHQYDVR0OBBYEFJLT/bq2cGAu6buAQSoeusx439YaMB8GA1UdIwQY
MBaAFJLT/bq2cGAu6buAQSoeusx439YaMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
hvcNAQELBQADggEBACmcsfDI382F64TWtJaEn4pKCTjiJloXfb3curr7qYVfeLUX
jbb6aRha88/PB+6/IC/lR0JjXRWMMQafFFR7rb1290p2YVPE9T5Wc5934M590LxZ
bwa5YsCF+qzBiWPMUs5s/el8AHTnUQdU/CKLMI7ZL2IpyTpfW4PERw2HiOBdgCbl
1DzjH9L1bmCzIhXBR6bUXUn4vjg8VBIQ29uHrLNN1fgDyRB1eAaOs4iuBAZm7IkC
k+cVw239GwbLsYkRg5BpkQF4IC6a4+Iz9fpvpUc/g6jpxtGU0kE2DVWOEAyPOOWC
C/t/GFcoOUze68WuI/BqMAiWhPJ1ioL7RI2ZPvI=
-----END CERTIFICATE-----
'''
def test_nginx_conf(tmp_path: Path, mocker):
from NGINXApiModule import create_nginx_server_conf
conf_file = str(tmp_path / "nginx-test-server.conf")
mocker.patch.object(demisto, 'callingContext', return_value={'context': {}})
create_nginx_server_conf(conf_file, 12345, params={})
with open(conf_file, 'rt') as f:
conf = f.read()
assert 'listen 12345 default_server' in conf
def test_nginx_conf_taxii2(tmp_path: Path, mocker):
from NGINXApiModule import create_nginx_server_conf
mocker.patch.object(demisto, 'callingContext', {'context': {'IntegrationBrand': 'TAXII2 Server'}})
conf_file = str(tmp_path / "nginx-test-server.conf")
create_nginx_server_conf(conf_file, 12345, params={'version': '2.0', 'credentials': {'identifier': 'identifier'}})
with open(conf_file, 'rt') as f:
conf = f.read()
assert '$http_authorization' in conf
assert '$http_accept' in conf
assert 'proxy_set_header Range $http_range;' in conf
assert '$http_range' in conf
NGINX_PROCESS: Optional[subprocess.Popen] = None
@pytest.fixture
def nginx_cleanup():
yield
from NGINXApiModule import NGINX_SERVER_CONF_FILE
Path(NGINX_SERVER_CONF_FILE).unlink(missing_ok=True)
global NGINX_PROCESS
if NGINX_PROCESS:
NGINX_PROCESS.terminate()
# let the process terminate
NGINX_PROCESS.wait(1.0)
NGINX_PROCESS = None
docker_only = pytest.mark.skipif('flask-nginx' not in os.getenv('DOCKER_IMAGE', ''), reason='test should run only within docker')
@docker_only
def test_nginx_start_fail(mocker: MockerFixture, nginx_cleanup):
"""Test that nginx fails when config is invalid
"""
def nginx_bad_conf(file_path: str, port: int, params: dict):
with open(file_path, 'wt') as f:
f.write('server {bad_stuff test;}')
import NGINXApiModule as module
mocker.patch.object(module, 'create_nginx_server_conf', side_effect=nginx_bad_conf)
try:
module.start_nginx_server(12345, {})
pytest.fail('nginx start should fail')
except ValueError as e:
assert 'bad_stuff' in str(e)
@docker_only
def test_nginx_start_fail_directive(nginx_cleanup, mocker):
"""Test that nginx fails when invalid global directive is passed
"""
import NGINXApiModule as module
try:
mocker.patch.object(demisto, 'callingContext', return_value={'context': {}})
module.start_nginx_server(12345, {'nginx_global_directives': 'bad_directive test;'})
pytest.fail('nginx start should fail')
except ValueError as e:
assert 'bad_directive' in str(e)
@docker_only
@pytest.mark.filterwarnings('ignore::urllib3.exceptions.InsecureRequestWarning')
@pytest.mark.parametrize('params', [
{},
{'certificate': SSL_TEST_CRT, 'key': SSL_TEST_KEY},
])
def test_nginx_test_start_valid(nginx_cleanup, params, mocker):
import NGINXApiModule as module
mocker.patch.object(demisto, 'callingContext', return_value={'context': {}})
module.test_nginx_server(11300, params)
# check that nginx process is not up
sleep(0.5)
ps_out = subprocess.check_output(['ps', 'aux'], text=True)
assert 'nginx' not in ps_out
@docker_only
def test_nginx_log_process(nginx_cleanup, mocker: MockerFixture):
import NGINXApiModule as module
# clear logs for test
Path(module.NGINX_SERVER_ACCESS_LOG).unlink(missing_ok=True)
Path(module.NGINX_SERVER_ERROR_LOG).unlink(missing_ok=True)
global NGINX_PROCESS
mocker.patch.object(demisto, 'callingContext', return_value={'context': {}})
NGINX_PROCESS = module.start_nginx_server(12345, {})
sleep(0.5) # give nginx time to start
# create a request to get a log line
requests.get('http://localhost:12345/nginx-test?unit_testing')
sleep(0.2)
mocker.patch.object(demisto, 'info')
mocker.patch.object(demisto, 'error')
module.nginx_log_process(NGINX_PROCESS)
# call_args is tuple (args list, kwargs). we only need the args
arg = demisto.info.call_args[0][0]
assert 'nginx access log' in arg
assert 'unit_testing' in arg
arg = demisto.error.call_args[0][0]
assert '[warn]' in arg
assert 'the master process runs with super-user privileges' in arg
# make sure old file was removed
assert not Path(module.NGINX_SERVER_ACCESS_LOG + '.old').exists()
assert not Path(module.NGINX_SERVER_ERROR_LOG + '.old').exists()
# make sure log was rolled over files should be of size 0
assert not Path(module.NGINX_SERVER_ACCESS_LOG).stat().st_size
assert not Path(module.NGINX_SERVER_ERROR_LOG).stat().st_size
def test_nginx_web_server_is_down(requests_mock, capfd):
import NGINXApiModule as module
with capfd.disabled():
requests_mock.get('http://localhost:9009/nginx-test', status_code=404)
with raises(DemistoException,
match='Testing nginx server: 404 Client Error: None for url: http://localhost:9009/nginx-test'):
module.test_nginx_web_server(9009, {})
def test_nginx_web_server_is_up_running(requests_mock):
import NGINXApiModule as module
requests_mock.get('http://localhost:9009/nginx-test', status_code=200, text='Welcome to nginx')
try:
module.test_nginx_web_server(9009, {})
except DemistoException as ex:
assert False, f'Raised an exception unexpectedly. {ex}'
| 42.985149 | 129 | 0.795002 |
acf15c687645edb053e6adc0680960c0a6d8ac58 | 1,455 | py | Python | celery_naive_tracker.py | penolove/Towards-Realtime-MOT | ce27578503194f410c340f366991476919e074c1 | [
"MIT"
] | null | null | null | celery_naive_tracker.py | penolove/Towards-Realtime-MOT | ce27578503194f410c340f366991476919e074c1 | [
"MIT"
] | null | null | null | celery_naive_tracker.py | penolove/Towards-Realtime-MOT | ce27578503194f410c340f366991476919e074c1 | [
"MIT"
] | null | null | null | import os
import argparse
import random
from celery import Celery
from eyewitness.mot.video import Mp4AsVideoData
from eyewitness.mot.visualize_mot import draw_tracking_result
from naive_tracker import TowardRealtimeMOTracker, get_spaced_colors
BROKER_URL = os.environ.get('broker_url', 'amqp://guest:guest@rabbitmq:5672')
celery = Celery('tasks', broker=BROKER_URL)
args = argparse.Namespace()
args.cfg = os.environ.get('cfg', 'cfg/yolov3.cfg')
args.weights = os.environ.get('weights', 'weights/latest.pt')
img_size = os.environ.get('img_size', (1088, 608))
if isinstance(img_size, str):
img_size = (int(i) for i in img_size.split(','))
args.img_size = img_size
args.iou_thres = float(os.environ.get('iou_thres', 0.5))
args.conf_thres = float(os.environ.get('conf_thres', 0.5))
args.nms_thres = float(os.environ.get('nms_thres', 0.4))
args.min_box_area = float(os.environ.get('min_box_area', 200))
args.track_buffer = float(os.environ.get('track_buffer', 600))
TRACKER = TowardRealtimeMOTracker(args)
@celery.task(name='track_video')
def track_video(params):
input_video = params.get('input_video', 'input.mp4')
output_video = params.get('output_video', 'output.mp4')
mp4_as_video = Mp4AsVideoData(input_video)
color_list = get_spaced_colors(100)
random.shuffle(color_list)
result = TRACKER.track(mp4_as_video)
draw_tracking_result(
result, color_list, mp4_as_video, output_video_path=output_video,
)
| 33.837209 | 77 | 0.752577 |
acf15dd18e2265dbe8c8c59c1549acb61728acb9 | 6,865 | py | Python | src/modules/criterions.py | howardchenhd/Transformer-pytorch | ae71ed5767272feb7e717be6d5bfce46f80ec57a | [
"MIT"
] | 1 | 2018-12-19T10:16:45.000Z | 2018-12-19T10:16:45.000Z | src/modules/criterions.py | howardchenhd/Transformer-pytorch | ae71ed5767272feb7e717be6d5bfce46f80ec57a | [
"MIT"
] | null | null | null | src/modules/criterions.py | howardchenhd/Transformer-pytorch | ae71ed5767272feb7e717be6d5bfce46f80ec57a | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.autograd import Variable
from src.utils import Vocab
# Loss compute
def filter_shard_state(state):
for k, v in state.items():
if v is not None:
if isinstance(v, Variable) and v.requires_grad:
v = Variable(v.data, requires_grad=True, volatile=False)
yield k, v
def shards(state, shard_size, eval=False, batch_dim=0):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval:
yield state
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, map(lambda t: t.contiguous(), torch.split(v, split_size=shard_size, dim=batch_dim)))
for k, v in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = ((state[k], v.grad.data) for k, v in non_none.items()
if isinstance(v, Variable) and v.grad is not None)
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
class Critierion(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating mutiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
normalzation (str): normalize by "sents" or "tokens"
"""
def __init__(self):
super(Critierion, self).__init__()
def _compute_loss(self, generator, *args, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
raise NotImplementedError
def shared_compute_loss(self,
generator,
shard_size,
normalization=1.0,
eval=False,
batch_dim=0, **kwargs):
# shard_state = self._make_shard_state(**kwargs)
loss_data = 0.0
for shard in shards(state=kwargs, shard_size=shard_size, eval=eval, batch_dim=batch_dim):
loss = self._compute_loss(generator=generator, **shard) # type: Variable
loss.div(normalization).backward()
loss_data += loss.data.clone()
return loss_data / normalization
def forward(self, generator, shard_size, normalization=1.0, eval=False, batch_dim=0, **kwargs):
if eval is True or shard_size < 0:
loss = self._compute_loss(generator, **kwargs).div(normalization)
if eval is False:
loss.backward()
return loss.data.clone()
else:
return loss.clone()
else:
return self.shared_compute_loss(generator=generator,
shard_size=shard_size,
normalization=normalization,
eval=eval,
batch_dim=batch_dim,
**kwargs)
class NMTCritierion(Critierion):
"""
TODO:
1. Add label smoothing
"""
def __init__(self, num_tokens, padding_idx=Vocab.PAD, label_smoothing=0.0):
super().__init__()
self.num_tokens = num_tokens
self.padding_idx = padding_idx
if label_smoothing > 0:
# When label smoothing is turned on,
# KL-divergence between q_{smoothed ground truth prob.}(w)
# and p_{prob. computed by model}(w) is minimized.
# If label smoothing value is set to zero, the loss
# is equivalent to NLLLoss or CrossEntropyLoss.
# All non-true labels are uniformly set to low-confidence.
self.criterion = nn.KLDivLoss(size_average=False)
one_hot = torch.randn(1, num_tokens)
one_hot.fill_(label_smoothing / (num_tokens - 2))
one_hot[0][padding_idx] = 0
self.register_buffer('one_hot', one_hot)
else:
weight = torch.ones(self.num_tokens)
weight[padding_idx] = 0
self.criterion = nn.NLLLoss(weight=weight, size_average=False)
self.confidence = 1.0 - label_smoothing
def _bottle(self, v):
return v.view(-1, v.size(2))
def _compute_loss(self, generator, dec_outs, labels):
scores = generator(self._bottle(dec_outs)) # [batch_size * seq_len, d_words]
gtruth = labels.view(-1)
if self.confidence < 1:
tdata = gtruth.data
mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()
log_likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1))
tmp_ = self.one_hot.repeat(gtruth.size(0), 1)
tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)
if mask.dim() > 0:
log_likelihood.index_fill_(0, mask, 0)
tmp_.index_fill_(0, mask, 0)
gtruth = Variable(tmp_, requires_grad=False)
loss = self.criterion(scores, gtruth)
return loss
| 36.908602 | 117 | 0.592571 |
acf15e80034924e916d228d28893af457f96760c | 10,386 | py | Python | pyleecan/Classes/PolarArc.py | stephane-eisen/pyleecan | 8444b8131c9eff11a616da8277fb1f280c8f70e5 | [
"Apache-2.0"
] | 1 | 2021-07-08T01:27:24.000Z | 2021-07-08T01:27:24.000Z | pyleecan/Classes/PolarArc.py | ecs-kev/pyleecan | 1faedde4b24acc6361fa1fdd4e980eaec4ca3a62 | [
"Apache-2.0"
] | null | null | null | pyleecan/Classes/PolarArc.py | ecs-kev/pyleecan | 1faedde4b24acc6361fa1fdd4e980eaec4ca3a62 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Geometry/PolarArc.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Geometry/PolarArc
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .Surface import Surface
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Geometry.PolarArc.get_lines import get_lines
except ImportError as error:
get_lines = error
try:
from ..Methods.Geometry.PolarArc.rotate import rotate
except ImportError as error:
rotate = error
try:
from ..Methods.Geometry.PolarArc.translate import translate
except ImportError as error:
translate = error
try:
from ..Methods.Geometry.PolarArc.check import check
except ImportError as error:
check = error
try:
from ..Methods.Geometry.PolarArc.comp_length import comp_length
except ImportError as error:
comp_length = error
try:
from ..Methods.Geometry.PolarArc.discretize import discretize
except ImportError as error:
discretize = error
try:
from ..Methods.Geometry.PolarArc.get_patches import get_patches
except ImportError as error:
get_patches = error
try:
from ..Methods.Geometry.PolarArc.comp_surface import comp_surface
except ImportError as error:
comp_surface = error
try:
from ..Methods.Geometry.PolarArc.comp_point_ref import comp_point_ref
except ImportError as error:
comp_point_ref = error
from ._check import InitUnKnowClassError
class PolarArc(Surface):
"""PolarArc defined by the center of object(point_ref), the label, the angle and the height"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Geometry.PolarArc.get_lines
if isinstance(get_lines, ImportError):
get_lines = property(
fget=lambda x: raise_(
ImportError("Can't use PolarArc method get_lines: " + str(get_lines))
)
)
else:
get_lines = get_lines
# cf Methods.Geometry.PolarArc.rotate
if isinstance(rotate, ImportError):
rotate = property(
fget=lambda x: raise_(
ImportError("Can't use PolarArc method rotate: " + str(rotate))
)
)
else:
rotate = rotate
# cf Methods.Geometry.PolarArc.translate
if isinstance(translate, ImportError):
translate = property(
fget=lambda x: raise_(
ImportError("Can't use PolarArc method translate: " + str(translate))
)
)
else:
translate = translate
# cf Methods.Geometry.PolarArc.check
if isinstance(check, ImportError):
check = property(
fget=lambda x: raise_(
ImportError("Can't use PolarArc method check: " + str(check))
)
)
else:
check = check
# cf Methods.Geometry.PolarArc.comp_length
if isinstance(comp_length, ImportError):
comp_length = property(
fget=lambda x: raise_(
ImportError(
"Can't use PolarArc method comp_length: " + str(comp_length)
)
)
)
else:
comp_length = comp_length
# cf Methods.Geometry.PolarArc.discretize
if isinstance(discretize, ImportError):
discretize = property(
fget=lambda x: raise_(
ImportError("Can't use PolarArc method discretize: " + str(discretize))
)
)
else:
discretize = discretize
# cf Methods.Geometry.PolarArc.get_patches
if isinstance(get_patches, ImportError):
get_patches = property(
fget=lambda x: raise_(
ImportError(
"Can't use PolarArc method get_patches: " + str(get_patches)
)
)
)
else:
get_patches = get_patches
# cf Methods.Geometry.PolarArc.comp_surface
if isinstance(comp_surface, ImportError):
comp_surface = property(
fget=lambda x: raise_(
ImportError(
"Can't use PolarArc method comp_surface: " + str(comp_surface)
)
)
)
else:
comp_surface = comp_surface
# cf Methods.Geometry.PolarArc.comp_point_ref
if isinstance(comp_point_ref, ImportError):
comp_point_ref = property(
fget=lambda x: raise_(
ImportError(
"Can't use PolarArc method comp_point_ref: " + str(comp_point_ref)
)
)
)
else:
comp_point_ref = comp_point_ref
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self, angle=1, height=1, point_ref=0, label="", init_dict=None, init_str=None
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "angle" in list(init_dict.keys()):
angle = init_dict["angle"]
if "height" in list(init_dict.keys()):
height = init_dict["height"]
if "point_ref" in list(init_dict.keys()):
point_ref = init_dict["point_ref"]
if "label" in list(init_dict.keys()):
label = init_dict["label"]
# Set the properties (value check and convertion are done in setter)
self.angle = angle
self.height = height
# Call Surface init
super(PolarArc, self).__init__(point_ref=point_ref, label=label)
# The class is frozen (in Surface init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
PolarArc_str = ""
# Get the properties inherited from Surface
PolarArc_str += super(PolarArc, self).__str__()
PolarArc_str += "angle = " + str(self.angle) + linesep
PolarArc_str += "height = " + str(self.height) + linesep
return PolarArc_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from Surface
if not super(PolarArc, self).__eq__(other):
return False
if other.angle != self.angle:
return False
if other.height != self.height:
return False
return True
def compare(self, other, name="self", ignore_list=None):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from Surface
diff_list.extend(super(PolarArc, self).compare(other, name=name))
if other._angle != self._angle:
diff_list.append(name + ".angle")
if other._height != self._height:
diff_list.append(name + ".height")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from Surface
S += super(PolarArc, self).__sizeof__()
S += getsizeof(self.angle)
S += getsizeof(self.height)
return S
def as_dict(self, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
# Get the properties inherited from Surface
PolarArc_dict = super(PolarArc, self).as_dict(**kwargs)
PolarArc_dict["angle"] = self.angle
PolarArc_dict["height"] = self.height
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
PolarArc_dict["__class__"] = "PolarArc"
return PolarArc_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.angle = None
self.height = None
# Set to None the properties inherited from Surface
super(PolarArc, self)._set_None()
def _get_angle(self):
"""getter of angle"""
return self._angle
def _set_angle(self, value):
"""setter of angle"""
check_var("angle", value, "float", Vmin=0)
self._angle = value
angle = property(
fget=_get_angle,
fset=_set_angle,
doc=u"""Polar angle
:Type: float
:min: 0
""",
)
def _get_height(self):
"""getter of height"""
return self._height
def _set_height(self, value):
"""setter of height"""
check_var("height", value, "float", Vmin=0)
self._height = value
height = property(
fget=_get_height,
fset=_set_height,
doc=u"""The Heigth of the PolarAngle
:Type: float
:min: 0
""",
)
| 32.45625 | 108 | 0.619584 |
acf15f23f75c934fa50dcbb5611b85ff1e1b1171 | 15,243 | py | Python | mne/gui/_marker_gui.py | weilongzheng/mne-python | a3b973b189940244a6ed1ec1e2d54f4044be9a06 | [
"BSD-3-Clause"
] | 2 | 2015-09-27T20:33:49.000Z | 2020-04-22T19:10:56.000Z | mne/gui/_marker_gui.py | TalLinzen/mne-python | 050a49fea804c4b10b8be477d0ba2d9b82399ed5 | [
"BSD-3-Clause"
] | null | null | null | mne/gui/_marker_gui.py | TalLinzen/mne-python | 050a49fea804c4b10b8be477d0ba2d9b82399ed5 | [
"BSD-3-Clause"
] | 1 | 2019-10-23T06:11:28.000Z | 2019-10-23T06:11:28.000Z | """Mayavi/traits GUI for averaging two sets of KIT marker points"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import numpy as np
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import confirm, error, FileDialog, OK, YES
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
cached_property, Instance, Property, Array, Bool,
Button, Enum, File, Float, List, Str)
from traitsui.api import View, Item, HGroup, VGroup, CheckListEditor
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except:
from ..utils import trait_wraith
HasTraits = object
HasPrivateTraits = object
cached_property = trait_wraith
on_trait_change = trait_wraith
MayaviScene = trait_wraith
MlabSceneModel = trait_wraith
Array = trait_wraith
Bool = trait_wraith
Button = trait_wraith
Enum = trait_wraith
File = trait_wraith
Float = trait_wraith
Instance = trait_wraith
Int = trait_wraith
List = trait_wraith
Property = trait_wraith
Str = trait_wraith
View = trait_wraith
Item = trait_wraith
HGroup = trait_wraith
VGroup = trait_wraith
CheckListEditor = trait_wraith
NoButtons = trait_wraith
SceneEditor = trait_wraith
from ..transforms import apply_trans, rotation, translation
from ..coreg import fit_matched_points
from ..fiff.kit import read_mrk, write_mrk
from ._viewer import HeadViewController, headview_borders, PointObject
backend_is_wx = False # is there a way to determine this?
if backend_is_wx:
mrk_wildcard = ['Supported Files (*.sqd, *.mrk, *.txt, *.pickled)|'
'*.sqd;*.mrk;*.txt;*.pickled',
'Sqd marker file (*.sqd;*.mrk)|*.sqd;*.mrk',
'Text marker file (*.txt)|*.txt',
'Pickled markers (*.pickled)|*.pickled']
mrk_out_wildcard = ["Tab separated values file (*.txt)|*.txt",
"Pickled KIT parameters (*.pickled)|*.pickled"]
else:
mrk_wildcard = ["*.sqd;*.mrk;*.txt;*.pickled"]
mrk_out_wildcard = ["*.txt;*.pickled"]
out_ext = ['.txt', '.pickled']
use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in range(5)])
use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
mrk_view_editable = View(
VGroup('file',
Item('name', show_label=False, style='readonly'),
HGroup(
Item('use', editor=use_editor_v, enabled_when="enabled",
style='custom'),
'points',
),
HGroup(Item('clear', enabled_when="can_save", show_label=False),
Item('save_as', enabled_when="can_save",
show_label=False)),
))
mrk_view_basic = View(
VGroup('file',
Item('name', show_label=False, style='readonly'),
Item('use', editor=use_editor_h, enabled_when="enabled",
style='custom'),
HGroup(Item('clear', enabled_when="can_save", show_label=False),
Item('edit', show_label=False),
Item('save_as', enabled_when="can_save",
show_label=False)),
))
mrk_view_edit = View(VGroup('points'))
class MarkerPoints(HasPrivateTraits):
"""Represent 5 marker points"""
points = Array(float, (5, 3))
can_save = Property(depends_on='points')
save_as = Button()
view = View(VGroup('points',
Item('save_as', enabled_when='can_save')))
@cached_property
def _get_can_save(self):
return np.any(self.points)
def _save_as_fired(self):
dlg = FileDialog(action="save as", wildcard=mrk_out_wildcard,
default_filename=self.name,
default_directory=self.dir)
dlg.open()
if dlg.return_code != OK:
return
ext = out_ext[dlg.wildcard_index]
path = dlg.path
if not path.endswith(ext):
path = path + ext
if os.path.exists(path):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.save(path)
def save(self, path):
"""Save the marker points
Parameters
----------
path : str
Path to the file to write. The kind of file to write is determined
based on the extension: '.txt' for tab separated text file,
'.pickled' for pickled file.
"""
write_mrk(path, self.points)
class MarkerPointSource(MarkerPoints):
"""MarkerPoints subclass for source files"""
file = File(filter=mrk_wildcard, exists=True)
name = Property(Str, depends_on='file')
dir = Property(Str, depends_on='file')
use = List(list(range(5)), desc="Which points to use for the interpolated "
"marker.")
enabled = Property(Bool, depends_on=['points', 'use'])
clear = Button(desc="Clear the current marker data")
edit = Button(desc="Edit the marker coordinates manually")
view = mrk_view_basic
@cached_property
def _get_enabled(self):
return np.any(self.points)
@cached_property
def _get_dir(self):
if self.file:
return os.path.dirname(self.file)
@cached_property
def _get_name(self):
if self.file:
return os.path.basename(self.file)
@on_trait_change('file')
def load(self, fname):
if not fname:
self.reset_traits(['points'])
return
try:
pts = read_mrk(fname)
except Exception as err:
error(None, str(err), "Error Reading mrk")
self.reset_traits(['points'])
else:
self.points = pts
def _clear_fired(self):
self.reset_traits(['file', 'points'])
def _edit_fired(self):
self.edit_traits(view=mrk_view_edit)
class MarkerPointDest(MarkerPoints):
"""MarkerPoints subclass that serves for derived points"""
src1 = Instance(MarkerPointSource)
src2 = Instance(MarkerPointSource)
name = Property(Str, depends_on='src1.name,src2.name')
dir = Property(Str, depends_on='src1.dir,src2.dir')
points = Property(Array(float, (5, 3)),
depends_on=['method', 'src1.points', 'src1.use',
'src2.points', 'src2.use'])
enabled = Property(Bool, depends_on=['points'])
method = Enum('Transform', 'Average', desc="Transform: estimate a rotation"
"/translation from mrk1 to mrk2; Average: use the average "
"of the mrk1 and mrk2 coordinates for each point.")
view = View(VGroup(Item('method', style='custom'),
Item('save_as', enabled_when='can_save',
show_label=False)))
@cached_property
def _get_dir(self):
return self.src1.dir
@cached_property
def _get_name(self):
n1 = self.src1.name
n2 = self.src2.name
if not n1:
if n2:
return n2
else:
return ''
elif not n2:
return n1
if n1 == n2:
return n1
i = 0
l1 = len(n1) - 1
l2 = len(n1) - 2
while n1[i] == n2[i]:
if i == l1:
return n1
elif i == l2:
return n2
i += 1
return n1[:i]
@cached_property
def _get_enabled(self):
return np.any(self.points)
@cached_property
def _get_points(self):
# in case only one or no source is enabled
if not (self.src1 and self.src1.enabled):
if (self.src2 and self.src2.enabled):
return self.src2.points
else:
return np.zeros((5, 3))
elif not (self.src2 and self.src2.enabled):
return self.src1.points
# Average method
if self.method == 'Average':
if len(np.union1d(self.src1.use, self.src2.use)) < 5:
error(None, "Need at least one source for each point.",
"Marker Average Error")
return np.zeros((5, 3))
pts = (self.src1.points + self.src2.points) / 2.
for i in np.setdiff1d(self.src1.use, self.src2.use):
pts[i] = self.src1.points[i]
for i in np.setdiff1d(self.src2.use, self.src1.use):
pts[i] = self.src2.points[i]
return pts
# Transform method
idx = np.intersect1d(self.src1.use, self.src2.use, assume_unique=True)
if len(idx) < 3:
error(None, "Need at least three shared points for trans"
"formation.", "Marker Interpolation Error")
return np.zeros((5, 3))
src_pts = self.src1.points[idx]
tgt_pts = self.src2.points[idx]
est = fit_matched_points(src_pts, tgt_pts, out='params')
rot = np.array(est[:3]) / 2.
tra = np.array(est[3:]) / 2.
if len(self.src1.use) == 5:
trans = np.dot(translation(*tra), rotation(*rot))
pts = apply_trans(trans, self.src1.points)
elif len(self.src2.use) == 5:
trans = np.dot(translation(* -tra), rotation(* -rot))
pts = apply_trans(trans, self.src2.points)
else:
trans1 = np.dot(translation(*tra), rotation(*rot))
pts = apply_trans(trans1, self.src1.points)
trans2 = np.dot(translation(* -tra), rotation(* -rot))
for i in np.setdiff1d(self.src2.use, self.src1.use):
pts[i] = apply_trans(trans2, self.src2.points[i])
return pts
class CombineMarkersModel(HasPrivateTraits):
mrk1_file = Instance(File)
mrk2_file = Instance(File)
mrk1 = Instance(MarkerPointSource)
mrk2 = Instance(MarkerPointSource)
mrk3 = Instance(MarkerPointDest)
# stats
distance = Property(Str, depends_on=['mrk1.points', 'mrk2.points'])
def _mrk1_default(self):
mrk = MarkerPointSource()
return mrk
def _mrk1_file_default(self):
return self.mrk1.trait('file')
def _mrk2_default(self):
mrk = MarkerPointSource()
return mrk
def _mrk2_file_default(self):
return self.mrk2.trait('file')
def _mrk3_default(self):
mrk = MarkerPointDest(src1=self.mrk1, src2=self.mrk2)
return mrk
@cached_property
def _get_distance(self):
if (self.mrk1 is None or self.mrk2 is None
or (not np.any(self.mrk1.points))
or (not np.any(self.mrk2.points))):
return ""
ds = np.sqrt(np.sum((self.mrk1.points - self.mrk2.points) ** 2, 1))
desc = '\t'.join('%.1f mm' % (d * 1000) for d in ds)
return desc
class CombineMarkersPanel(HasTraits):
"""Has two marker points sources and interpolates to a third one"""
model = Instance(CombineMarkersModel, ())
# model references for UI
mrk1 = Instance(MarkerPointSource)
mrk2 = Instance(MarkerPointSource)
mrk3 = Instance(MarkerPointDest)
distance = Str
# Visualization
scene = Instance(MlabSceneModel)
scale = Float(5e-3)
mrk1_obj = Instance(PointObject)
mrk2_obj = Instance(PointObject)
mrk3_obj = Instance(PointObject)
trans = Array()
view = View(VGroup(VGroup(Item('mrk1', style='custom'),
Item('mrk1_obj', style='custom'),
show_labels=False,
label="Source Marker 1", show_border=True),
VGroup(Item('mrk2', style='custom'),
Item('mrk2_obj', style='custom'),
show_labels=False,
label="Source Marker 2", show_border=True),
VGroup(Item('distance', style='readonly'),
label='Stats', show_border=True),
VGroup(Item('mrk3', style='custom'),
Item('mrk3_obj', style='custom'),
show_labels=False,
label="New Marker", show_border=True),
))
def _mrk1_default(self):
return self.model.mrk1
def _mrk2_default(self):
return self.model.mrk2
def _mrk3_default(self):
return self.model.mrk3
def __init__(self, *args, **kwargs):
super(CombineMarkersPanel, self).__init__(*args, **kwargs)
m = self.model
m.sync_trait('distance', self, 'distance', mutual=False)
self.mrk1_obj = PointObject(scene=self.scene, color=(155, 55, 55),
point_scale=self.scale)
self.sync_trait('trans', self.mrk1_obj, mutual=False)
m.mrk1.sync_trait('points', self.mrk1_obj, 'points', mutual=False)
m.mrk1.sync_trait('enabled', self.mrk1_obj, 'visible',
mutual=False)
self.mrk2_obj = PointObject(scene=self.scene, color=(55, 155, 55),
point_scale=self.scale)
self.sync_trait('trans', self.mrk2_obj, mutual=False)
m.mrk2.sync_trait('points', self.mrk2_obj, 'points', mutual=False)
m.mrk2.sync_trait('enabled', self.mrk2_obj, 'visible',
mutual=False)
self.mrk3_obj = PointObject(scene=self.scene, color=(150, 200, 255),
point_scale=self.scale)
self.sync_trait('trans', self.mrk3_obj, mutual=False)
m.mrk3.sync_trait('points', self.mrk3_obj, 'points', mutual=False)
m.mrk3.sync_trait('enabled', self.mrk3_obj, 'visible', mutual=False)
class CombineMarkersFrame(HasTraits):
"""GUI for interpolating between two KIT marker files
Parameters
----------
mrk1, mrk2 : str
Path to pre- and post measurement marker files (*.sqd) or empty string.
"""
model = Instance(CombineMarkersModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
panel = Instance(CombineMarkersPanel)
def _headview_default(self):
return HeadViewController(scene=self.scene, system='ALS')
def _panel_default(self):
return CombineMarkersPanel(model=self.model, scene=self.scene)
view = View(HGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical'),
VGroup(headview_borders,
Item('panel', style="custom"),
show_labels=False),
show_labels=False,
),
width=1100, resizable=True,
buttons=NoButtons)
| 34.024554 | 79 | 0.570098 |
acf15f93a0e981a1c76ebf34b15ad4740152aed5 | 1,698 | py | Python | libraries/botbuilder-ai/setup.py | PrettyWood/botbuilder-python | ab79f6b60066b05a00f729d6cb1d8bee30a786e2 | [
"MIT"
] | null | null | null | libraries/botbuilder-ai/setup.py | PrettyWood/botbuilder-python | ab79f6b60066b05a00f729d6cb1d8bee30a786e2 | [
"MIT"
] | null | null | null | libraries/botbuilder-ai/setup.py | PrettyWood/botbuilder-python | ab79f6b60066b05a00f729d6cb1d8bee30a786e2 | [
"MIT"
] | 1 | 2022-02-24T10:23:28.000Z | 2022-02-24T10:23:28.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup
REQUIRES = [
"azure-cognitiveservices-language-luis==0.2.0",
"botbuilder-schema==4.11.0",
"botbuilder-core==4.11.2",
"aiohttp==3.6.2",
]
TESTS_REQUIRES = ["aiounittest>=1.1.0"]
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "botbuilder", "ai", "about.py")) as f:
package_info = {}
info = f.read()
exec(info, package_info)
with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name=package_info["__title__"],
version=package_info["__version__"],
url=package_info["__uri__"],
author=package_info["__author__"],
description=package_info["__description__"],
keywords="botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder",
long_description=long_description,
long_description_content_type="text/x-rst",
license=package_info["__license__"],
packages=[
"botbuilder.ai",
"botbuilder.ai.qna",
"botbuilder.ai.luis",
"botbuilder.ai.qna.models",
"botbuilder.ai.qna.utils",
"botbuilder.ai.qna.dialogs",
],
install_requires=REQUIRES + TESTS_REQUIRES,
tests_require=TESTS_REQUIRES,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 30.321429 | 75 | 0.6649 |
acf15fcc400302c1f84149eadd62b98c68c7fa7f | 26,412 | py | Python | tests/scripts/run-pmi-diffs.py | pentp/coreclr | fdef855fa1df3540ba632bfae850279d627bde66 | [
"MIT"
] | 1 | 2020-06-16T22:25:41.000Z | 2020-06-16T22:25:41.000Z | tests/scripts/run-pmi-diffs.py | pentp/coreclr | fdef855fa1df3540ba632bfae850279d627bde66 | [
"MIT"
] | null | null | null | tests/scripts/run-pmi-diffs.py | pentp/coreclr | fdef855fa1df3540ba632bfae850279d627bde66 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
#
##########################################################################
##########################################################################
#
# Module: run-pmi-diffs.py
#
# Notes:
#
# TODO: Instead of downloading and extracting the dotnet CLI, can we convert
# to using init-tools.cmd/sh and the Tools/dotnetcli "last known good"
# version? (This maybe should be done for format.py as well.)
#
# Script to automate running PMI diffs on a pull request
#
##########################################################################
##########################################################################
import argparse
import distutils.dir_util
import os
import re
import shutil
import subprocess
import urllib
import urllib2
import sys
import tarfile
import zipfile
##########################################################################
# Globals
##########################################################################
testing = False
Coreclr_url = 'https://github.com/dotnet/coreclr.git'
Jitutils_url = 'https://github.com/dotnet/jitutils.git'
# The Docker file and possibly options should be hoisted out to a text file to be shared between scripts.
Docker_name_arm32 = 'microsoft/dotnet-buildtools-prereqs:ubuntu-14.04-cross-e435274-20180426002420'
Docker_opts_arm32 = '-e ROOTFS_DIR=/crossrootfs/arm'
Docker_name_arm64 = 'microsoft/dotnet-buildtools-prereqs:ubuntu-16.04-cross-arm64-a3ae44b-20180315221921'
Docker_opts_arm64 = '-e ROOTFS_DIR=/crossrootfs/arm64'
# This should be factored out of build.sh
Unix_name_map = {
'Linux': 'Linux',
'Darwin': 'OSX',
'FreeBSD': 'FreeBSD',
'OpenBSD': 'OpenBSD',
'NetBSD': 'NetBSD',
'SunOS': 'SunOS'
}
Is_windows = (os.name == 'nt')
Clr_os = 'Windows_NT' if Is_windows else Unix_name_map[os.uname()[0]]
##########################################################################
# Delete protocol
##########################################################################
def del_rw(action, name, exc):
os.chmod(name, 0651)
os.remove(name)
##########################################################################
# Argument Parser
##########################################################################
description = 'Tool to generate JIT assembly diffs from the CoreCLR repo'
parser = argparse.ArgumentParser(description=description)
# base_root is normally expected to be None, in which case we'll clone the
# coreclr tree and build it. If base_root is passed, we'll use it, and not
# clone or build the base.
parser.add_argument('-arch', dest='arch', default='x64')
parser.add_argument('-ci_arch', dest='ci_arch', default=None)
parser.add_argument('-build_type', dest='build_type', default='Checked')
parser.add_argument('-base_root', dest='base_root', default=None)
parser.add_argument('-diff_root', dest='diff_root', default=None)
parser.add_argument('-scratch_root', dest='scratch_root', default=None)
parser.add_argument('--skip_baseline_build', dest='skip_baseline_build', action='store_true', default=False)
parser.add_argument('--skip_diffs', dest='skip_diffs', action='store_true', default=False)
parser.add_argument('-target_branch', dest='target_branch', default='master')
parser.add_argument('-commit_hash', dest='commit_hash', default=None)
##########################################################################
# Class to change the current directory, and automatically restore the
# directory back to what it used to be, on exit.
##########################################################################
class ChangeDir:
def __init__(self, dir):
self.dir = dir
self.cwd = None
def __enter__(self):
self.cwd = os.getcwd()
log('[cd] %s' % self.dir)
if not testing:
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
log('[cd] %s' % self.cwd)
if not testing:
os.chdir(self.cwd)
##########################################################################
# Helper Functions
##########################################################################
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser): Args parsed by the argument parser.
Returns:
(arch, ci_arch, build_type, base_root, diff_root, scratch_root, skip_baseline_build, skip_diffs, target_branch, commit_hash)
(str, str, str, str, str, str, bool, bool, str, str)
Notes:
If the arguments are valid then return them all in a tuple. If not, raise
an exception stating x argument is incorrect.
"""
arch = args.arch
ci_arch = args.ci_arch
build_type = args.build_type
base_root = args.base_root
diff_root = args.diff_root
scratch_root = args.scratch_root
skip_baseline_build = args.skip_baseline_build
skip_diffs = args.skip_diffs
target_branch = args.target_branch
commit_hash = args.commit_hash
def validate_arg(arg, check):
""" Validate an individual arg
Args:
arg (str|bool): argument to be validated
check (lambda: x-> bool): test that returns either True or False
: based on whether the check passes.
Returns:
is_valid (bool): Is the argument valid?
"""
helper = lambda item: item is not None and check(item)
if not helper(arg):
raise Exception('Argument: %s is not valid.' % (arg))
valid_archs = ['x86', 'x64', 'arm', 'arm64']
valid_ci_archs = valid_archs + ['x86_arm_altjit', 'x64_arm64_altjit']
valid_build_types = ['Debug', 'Checked', 'Release']
arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
validate_arg(arch, lambda item: item in valid_archs)
validate_arg(build_type, lambda item: item in valid_build_types)
if diff_root is None:
diff_root = nth_dirname(os.path.abspath(sys.argv[0]), 3)
else:
diff_root = os.path.abspath(diff_root)
validate_arg(diff_root, lambda item: os.path.isdir(diff_root))
if scratch_root is None:
scratch_root = os.path.join(diff_root, '_', 'pmi')
else:
scratch_root = os.path.abspath(scratch_root)
if ci_arch is not None:
validate_arg(ci_arch, lambda item: item in valid_ci_archs)
args = (arch, ci_arch, build_type, base_root, diff_root, scratch_root, skip_baseline_build, skip_diffs, target_branch, commit_hash)
log('Configuration:')
log(' arch: %s' % arch)
log(' ci_arch: %s' % ci_arch)
log(' build_type: %s' % build_type)
log(' base_root: %s' % base_root)
log(' diff_root: %s' % diff_root)
log(' scratch_root: %s' % scratch_root)
log(' skip_baseline_build: %s' % skip_baseline_build)
log(' skip_diffs: %s' % skip_diffs)
log(' target_branch: %s' % target_branch)
log(' commit_hash: %s' % commit_hash)
return args
def nth_dirname(path, n):
""" Find the Nth parent directory of the given path
Args:
path (str): path name containing at least N components
n (int): num of basenames to remove
Returns:
outpath (str): path with the last n components removed
Notes:
If n is 0, path is returned unmodified
"""
assert n >= 0
for i in range(0, n):
path = os.path.dirname(path)
return path
def log(message):
""" Print logging information
Args:
message (str): message to be printed
"""
print '[%s]: %s' % (sys.argv[0], message)
def copy_files(source_dir, target_dir):
""" Copy any files in the source_dir to the target_dir.
The copy is not recursive.
The directories must already exist.
Args:
source_dir (str): source directory path
target_dir (str): target directory path
Returns:
Nothing
"""
global testing
assert os.path.isdir(source_dir)
assert os.path.isdir(target_dir)
for source_filename in os.listdir(source_dir):
source_pathname = os.path.join(source_dir, source_filename)
if os.path.isfile(source_pathname):
target_pathname = os.path.join(target_dir, source_filename)
log('Copy: %s => %s' % (source_pathname, target_pathname))
if not testing:
shutil.copy2(source_pathname, target_pathname)
def run_command(command, command_env):
""" Run a command (process) in a given environment. stdout/stderr are output piped through.
Args:
command (array): the command to run, with components of the command as separate elements.
command_env (map): environment in which the command should be run
Returns:
The return code of the command.
"""
returncode = 0
log('Invoking: %s' % (' '.join(command)))
if not testing:
proc = subprocess.Popen(command, env=command_env)
output,error = proc.communicate()
returncode = proc.returncode
if returncode != 0:
log('Return code = %s' % returncode)
return returncode
##########################################################################
# Do baseline build:
# 1. determine appropriate commit,
# 2. clone coreclr,
# 3. do build
##########################################################################
def baseline_build():
if not testing:
if os.path.isdir(baseCoreClrPath):
log('Removing existing tree: %s' % baseCoreClrPath)
shutil.rmtree(baseCoreClrPath, onerror=del_rw)
# Find the baseline commit
# Clone at that commit
command = 'git clone -b %s --single-branch %s %s' % (
target_branch, Coreclr_url, baseCoreClrPath)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: git clone failed')
return 1
# Change directory to the baseline root
with ChangeDir(baseCoreClrPath):
# Set up for possible docker usage
scriptPath = '.'
buildOpts = ''
dockerCmd = ''
if not Is_windows and (arch == 'arm' or arch == 'arm64'):
# Linux arm and arm64 builds are cross-compilation builds using Docker.
if arch == 'arm':
dockerFile = Docker_name_arm32
dockerOpts = Docker_opts_arm32
else:
# arch == 'arm64'
dockerFile = Docker_name_arm64
dockerOpts = Docker_opts_arm64
dockerCmd = 'docker run -i --rm -v %s:%s -w %s %s %s ' % (baseCoreClrPath, baseCoreClrPath, baseCoreClrPath, dockerOpts, dockerFile)
buildOpts = 'cross'
scriptPath = baseCoreClrPath
# Build a checked baseline jit
if Is_windows:
command = 'set __TestIntermediateDir=int&&build.cmd %s checked skiptests skipbuildpackages' % arch
else:
command = '%s%s/build.sh %s checked skiptests skipbuildpackages %s' % (dockerCmd, scriptPath, arch, buildOpts)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: build failed')
return 1
# Build the layout (Core_Root) directory
# For Windows, invoke build-test.cmd to restore packages before generating the layout.
if Is_windows:
command = 'build-test.cmd %s %s skipmanaged skipnative' % (build_type, arch)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: restoring packages failed')
return 1
if Is_windows:
command = 'tests\\runtest.cmd %s checked GenerateLayoutOnly' % arch
else:
command = '%s%s/build-test.sh %s checked generatelayoutonly' % (dockerCmd, scriptPath, arch)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: generating layout failed')
return 1
return 0
##########################################################################
# Do PMI diff run:
# 1. download dotnet CLI (needed by jitutils)
# 2. clone jitutils repo
# 3. build jitutils
# 4. run PMI asm generation on baseline and diffs
# 5. run jit-analyze to compare baseline and diff
##########################################################################
def do_pmi_diffs():
global baseCoreClrPath
# Setup scratch directories. Names are short to avoid path length problems on Windows.
dotnetcliPath = os.path.abspath(os.path.join(scratch_root, 'cli'))
jitutilsPath = os.path.abspath(os.path.join(scratch_root, 'jitutils'))
asmRootPath = os.path.abspath(os.path.join(scratch_root, 'asm'))
dotnet_tool = 'dotnet.exe' if Is_windows else 'dotnet'
# Make sure the temporary directories do not exist. If they do already, delete them.
if not testing:
# If we can't delete the dotnet tree, it might be because a previous run failed or was
# cancelled, and the build servers are still running. Try to stop it if that happens.
if os.path.isdir(dotnetcliPath):
try:
log('Removing existing tree: %s' % dotnetcliPath)
shutil.rmtree(dotnetcliPath, onerror=del_rw)
except OSError:
if os.path.isfile(os.path.join(dotnetcliPath, dotnet_tool)):
log('Failed to remove existing tree; trying to shutdown the dotnet build servers before trying again.')
# Looks like the dotnet too is still there; try to run it to shut down the build servers.
temp_env = my_env
temp_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]
log('Shutting down build servers')
command = ["dotnet", "build-server", "shutdown"]
returncode = run_command(command, temp_env)
# Try again
log('Trying again to remove existing tree: %s' % dotnetcliPath)
shutil.rmtree(dotnetcliPath, onerror=del_rw)
else:
log('Failed to remove existing tree')
return 1
if os.path.isdir(jitutilsPath):
log('Removing existing tree: %s' % jitutilsPath)
shutil.rmtree(jitutilsPath, onerror=del_rw)
if os.path.isdir(asmRootPath):
log('Removing existing tree: %s' % asmRootPath)
shutil.rmtree(asmRootPath, onerror=del_rw)
try:
os.makedirs(dotnetcliPath)
os.makedirs(jitutilsPath)
os.makedirs(asmRootPath)
except OSError:
if not os.path.isdir(dotnetcliPath):
log('ERROR: cannot create CLI install directory %s' % dotnetcliPath)
return 1
if not os.path.isdir(jitutilsPath):
log('ERROR: cannot create jitutils install directory %s' % jitutilsPath)
return 1
if not os.path.isdir(asmRootPath):
log('ERROR: cannot create asm directory %s' % asmRootPath)
return 1
log('dotnet CLI install directory: %s' % dotnetcliPath)
log('jitutils install directory: %s' % jitutilsPath)
log('asm directory: %s' % asmRootPath)
# Download .NET CLI
log('Downloading .Net CLI')
dotnetcliUrl = ""
dotnetcliFilename = ""
if Clr_os == 'Linux' and arch == 'x64':
dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-linux-x64.tar.gz"
elif Clr_os == 'Linux' and arch == 'arm':
dotnetcliUrl = "https://dotnetcli.blob.core.windows.net/dotnet/Sdk/release/2.1.4xx/dotnet-sdk-latest-linux-arm.tar.gz"
elif Clr_os == 'Linux' and arch == 'arm64':
# Use the latest (3.0) dotnet SDK. Earlier versions don't work.
dotnetcliUrl = "https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master/dotnet-sdk-latest-linux-arm64.tar.gz"
elif Clr_os == 'OSX':
dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-osx-x64.tar.gz"
elif Clr_os == 'Windows_NT':
dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-win-x64.zip"
else:
log('ERROR: unknown or unsupported OS (%s) architecture (%s) combination' % (Clr_os, arch))
return 1
if Is_windows:
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.zip')
else:
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
log('Downloading: %s => %s' % (dotnetcliUrl, dotnetcliFilename))
if not testing:
response = urllib2.urlopen(dotnetcliUrl)
request_url = response.geturl()
testfile = urllib.URLopener()
testfile.retrieve(request_url, dotnetcliFilename)
if not os.path.isfile(dotnetcliFilename):
log('ERROR: Did not download .Net CLI')
return 1
# Install .Net CLI
log('Unpacking .Net CLI')
if not testing:
if Is_windows:
with zipfile.ZipFile(dotnetcliFilename, "r") as z:
z.extractall(dotnetcliPath)
else:
tar = tarfile.open(dotnetcliFilename)
tar.extractall(dotnetcliPath)
tar.close()
if not os.path.isfile(os.path.join(dotnetcliPath, dotnet_tool)):
log('ERROR: did not extract .Net CLI from download')
return 1
# Add dotnet CLI to PATH we'll use to spawn processes.
log('Add %s to my PATH' % dotnetcliPath)
my_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]
# Clone jitutils
command = 'git clone -b master --single-branch %s %s' % (Jitutils_url, jitutilsPath)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: cannot clone jitutils');
return 1
# We're going to start running dotnet CLI commands. Unfortunately, once you've done that,
# the dotnet CLI sticks around with a set of build server processes running. Put all this
# in a try/finally, and stop the build servers under any circumstance.
try:
#
# Build jitutils, including "dotnet restore"
#
# Change directory to the jitutils root
with ChangeDir(jitutilsPath):
# Do "dotnet restore"
command = ["dotnet", "restore"]
returncode = run_command(command, my_env)
# Do build
command = ['build.cmd', '-p'] if Is_windows else ['bash', './build.sh', '-p']
returncode = run_command(command, my_env)
if returncode != 0:
log('ERROR: jitutils build failed')
return 1
jitutilsBin = os.path.join(jitutilsPath, "bin")
if not testing and not os.path.isdir(jitutilsBin):
log("ERROR: jitutils not correctly built")
return 1
jitDiffPath = os.path.join(jitutilsBin, "jit-diff.dll")
if not testing and not os.path.isfile(jitDiffPath):
log("ERROR: jit-diff.dll not built")
return 1
jitAnalyzePath = os.path.join(jitutilsBin, "jit-analyze.dll")
if not testing and not os.path.isfile(jitAnalyzePath):
log("ERROR: jit-analyze.dll not built")
return 1
# Add jitutils bin to path for spawned processes
log('Add %s to my PATH' % jitutilsBin)
my_env["PATH"] = jitutilsBin + os.pathsep + my_env["PATH"]
#
# Run PMI asm diffs
#
# We want this script as a whole to return 0 if it succeeds (even if there are diffs) and only
# return non-zero if there are any fatal errors.
#
# TO DO: figure out how to differentiate fatal errors and a return code indicating there are diffs,
# and have the invoking netci.groovy code act differently for each case.
# Generate the diffs
#
# Invoke command like:
# dotnet c:\gh\jitutils\bin\jit-diff.dll diff --pmi --base --base_root f:\gh\coreclr12 --diff --diff_root f:\gh\coreclr10 --arch x64 --build Checked --tag 1 --noanalyze --output f:\output --corelib
#
# We pass --noanalyze and call jit-analyze manually. This isn't really necessary, but it does give us better output
# due to https://github.com/dotnet/jitutils/issues/175.
altjit_args = []
if ci_arch is not None and (ci_arch == 'x86_arm_altjit' or ci_arch == 'x64_arm64_altjit'):
altjit_args = ["--altjit", "protononjit.dll"]
# Over which set of assemblies should we generate asm?
# TODO: parameterize this
asm_source_args = ["--frameworks", "--benchmarks"]
command = ["dotnet", jitDiffPath, "diff", "--pmi", "--base", "--base_root", baseCoreClrPath, "--diff", "--diff_root", diff_root, "--arch", arch, "--build", build_type, "--tag", "1", "--noanalyze", "--output", asmRootPath] + asm_source_args + altjit_args
returncode = run_command(command, my_env)
# We ignore the return code: it is non-zero if there are any diffs. If there are fatal errors here, we will miss them.
# Question: does jit-diff distinguish between non-zero fatal error code and the existence of diffs?
# Did we get any diffs?
baseOutputDir = os.path.join(asmRootPath, "1", "base")
if not testing and not os.path.isdir(baseOutputDir):
log("ERROR: base asm not generated")
return 1
diffOutputDir = os.path.join(asmRootPath, "1", "diff")
if not testing and not os.path.isdir(diffOutputDir):
log("ERROR: diff asm not generated")
return 1
# Do the jit-analyze comparison:
# dotnet c:\gh\jitutils\bin\jit-analyze.dll --base f:\output\diffs\1\base --recursive --diff f:\output\diffs\1\diff
command = ["dotnet", jitAnalyzePath, "--recursive", "--base", baseOutputDir, "--diff", diffOutputDir]
returncode = run_command(command, my_env)
if returncode != 0:
# This is not a fatal error.
log('Compare: %s %s' % (baseOutputDir, diffOutputDir))
finally:
# Shutdown the dotnet build servers before cleaning things up
# TODO: make this shutdown happen anytime after we've run any 'dotnet' commands. I.e., try/finally style.
log('Shutting down build servers')
command = ["dotnet", "build-server", "shutdown"]
returncode = run_command(command, my_env)
return 0
##########################################################################
# Main
##########################################################################
def main(args):
global arch, ci_arch, build_type, base_root, diff_root, scratch_root, skip_baseline_build, skip_diffs, target_branch, commit_hash
global my_env
global base_layout_root
global diff_layout_root
global baseCoreClrPath
global testing
arch, ci_arch, build_type, base_root, diff_root, scratch_root, skip_baseline_build, skip_diffs, target_branch, commit_hash = validate_args(args)
my_env = os.environ
if not testing and not os.path.isdir(diff_root):
log('ERROR: root directory for coreclr diff tree not found: %s' % diff_root)
return 1
# Check the diff layout directory before going too far.
diff_layout_root = os.path.join(diff_root,
'bin',
'tests',
'%s.%s.%s' % (Clr_os, arch, build_type),
'Tests',
'Core_Root')
if not testing and not os.path.isdir(diff_layout_root):
log('ERROR: diff test overlay not found or is not a directory: %s' % diff_layout_root)
return 1
# Create the scratch root directory
if not testing:
try:
os.makedirs(scratch_root)
except OSError:
if not os.path.isdir(scratch_root):
log('ERROR: cannot create scratch directory %s' % scratch_root)
return 1
# Set up baseline root directory. If one is passed to us, we use it. Otherwise, we create
# a temporary directory.
if base_root is None:
# Setup scratch directories. Names are short to avoid path length problems on Windows.
# No need to create this directory now, as the "git clone" will do it later.
baseCoreClrPath = os.path.abspath(os.path.join(scratch_root, 'base'))
else:
baseCoreClrPath = os.path.abspath(base_root)
if not testing and not os.path.isdir(baseCoreClrPath):
log('ERROR: base root directory not found or is not a directory: %s' % baseCoreClrPath)
return 1
# Do the baseline build, if needed
if not skip_baseline_build and base_root is None:
returncode = baseline_build()
if returncode != 0:
return 1
# Check that the baseline root directory was created.
base_layout_root = os.path.join(baseCoreClrPath,
'bin',
'tests',
'%s.%s.%s' % (Clr_os, arch, build_type),
'Tests',
'Core_Root')
if not testing and not os.path.isdir(base_layout_root):
log('ERROR: baseline test overlay not found or is not a directory: %s' % base_layout_root)
return 1
# Do the diff run, if needed
if not skip_diffs:
returncode = do_pmi_diffs()
if returncode != 0:
return 1
return 0
##########################################################################
# setup for Main
##########################################################################
if __name__ == '__main__':
Args = parser.parse_args(sys.argv[1:])
return_code = main(Args)
log('Exit code: %s' % return_code)
sys.exit(return_code)
| 37.517045 | 261 | 0.590338 |
acf1625d16064a0826cd48cb7d91519afd741f21 | 565 | py | Python | guests/migrations/0009_guest_meal.py | dannymuchoki/django-wedding-website | 4cb322719b04500b587500ea65311f3db302732d | [
"Apache-2.0"
] | null | null | null | guests/migrations/0009_guest_meal.py | dannymuchoki/django-wedding-website | 4cb322719b04500b587500ea65311f3db302732d | [
"Apache-2.0"
] | null | null | null | guests/migrations/0009_guest_meal.py | dannymuchoki/django-wedding-website | 4cb322719b04500b587500ea65311f3db302732d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-20 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guests', '0008_auto_20160214_1642'),
]
operations = [
migrations.AddField(
model_name='guest',
name='meal',
field=models.CharField(blank=True, choices=[('beef', 'Cow'), ('chicken', 'Chicken'), ('vegetarian', 'Vegetable')], max_length=20, null=True),
),
]
| 26.904762 | 154 | 0.59115 |
acf162fb85e228e9ac8ae4d12a710f7d410683fc | 3,706 | py | Python | main.py | Gyro7/Capsian-Engine | 0cedbfc9dbdf741926ae48e680d2b89c35b91af6 | [
"Apache-2.0"
] | null | null | null | main.py | Gyro7/Capsian-Engine | 0cedbfc9dbdf741926ae48e680d2b89c35b91af6 | [
"Apache-2.0"
] | null | null | null | main.py | Gyro7/Capsian-Engine | 0cedbfc9dbdf741926ae48e680d2b89c35b91af6 | [
"Apache-2.0"
] | null | null | null | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Capsian Engine
# Copyright 2020 - 2021 Alessandro Salerno (Tzyvoski)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from Capsian import *
from os import system
import os
system("cls") if os.name == "nt" else system("clear")
# Eval the contens of the options file
with open("options.cpsn", "r") as preferences:
global options
_options = preferences.read()
options = eval(compile(source=_options, filename="options", mode="eval", optimize=1))
# Compiles and runs scripts
import scripts
try:
# Enable Capsian Basic Lighting if required
if options["use basic lighting"]:
engine.main_window.enable(CPSN_LIGHTING)
# Set OpenGL Clear Color
SkyColor << options["clear color"]
# Set fog settings
if options["enable fog"]:
fog_color = options["fog color"]
fog_start = options["fog start"]
fog_end = options["fog end"]
Fog(fog_color, fog_start, fog_end)
except:
_errcam = OrthographicCamera()
_errwin = Window3D(camera=_errcam, width=1024, height=500)
Log.critical("Something went wrong while setting up your game. This is usually caused by the absence of a default window and/or camera")
# Runs all the code3
engine.run()
# Random print() to make the output look cleaner
print()
| 37.06 | 140 | 0.67782 |
acf16367a8b867fd74ab9e243c33ab47e688b336 | 378 | py | Python | spotter/spotter_app/migrations/0005_alter_user_bio.py | gulpinhenry/spotter | 2a3f828f2e09dc4835861e2be489f537a197b19a | [
"MIT"
] | 1 | 2022-02-05T23:04:04.000Z | 2022-02-05T23:04:04.000Z | spotter/spotter_app/migrations/0005_alter_user_bio.py | gulpinhenry/spotter | 2a3f828f2e09dc4835861e2be489f537a197b19a | [
"MIT"
] | null | null | null | spotter/spotter_app/migrations/0005_alter_user_bio.py | gulpinhenry/spotter | 2a3f828f2e09dc4835861e2be489f537a197b19a | [
"MIT"
] | 1 | 2022-02-06T23:16:16.000Z | 2022-02-06T23:16:16.000Z | # Generated by Django 4.0 on 2022-02-06 01:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spotter_app', '0004_alter_group_name'),
]
operations = [
migrations.AlterField(
model_name='user',
name='bio',
field=models.TextField(default=''),
),
]
| 19.894737 | 49 | 0.587302 |
acf1644d576695b0a4a54adac7cb62488f718cd5 | 1,447 | py | Python | sublime-plugin/poli/module/listener.py | egnartsms/poli | 3a9eab2261688ed84b83808722360356b8e67522 | [
"MIT"
] | 1 | 2020-06-07T20:55:27.000Z | 2020-06-07T20:55:27.000Z | sublime-plugin/poli/module/listener.py | egnartsms/poli | 3a9eab2261688ed84b83808722360356b8e67522 | [
"MIT"
] | 2 | 2021-01-22T08:45:48.000Z | 2021-01-22T08:45:49.000Z | sublime-plugin/poli/module/listener.py | egnartsms/poli | 3a9eab2261688ed84b83808722360356b8e67522 | [
"MIT"
] | null | null | null | import re
import sublime
import sublime_plugin
import sys
import poli.config as config
from poli.comm import comm
from poli.module import operation as op
__all__ = ['PoliViewListener']
class PoliViewListener(sublime_plugin.ViewEventListener):
@classmethod
def is_applicable(cls, settings):
# Lord, forgive me for doing this..
view = sys._getframe(1).f_locals.get('view')
return view is not None and op.is_view_poli(view)
def on_load(self):
if not config.enabled:
return
op.setup_module_view(self.view)
def on_activated(self):
op.set_connected_status(self.view, comm.is_connected)
def on_query_completions(self, prefix, locations):
if not config.enabled:
return None
if len(locations) != 1:
return None
[pt] = locations
linereg = self.view.line(pt)
str_prec = self.view.substr(sublime.Region(linereg.begin(), pt))
mtch = re.search(r'^.*?\$(?:\.(?P<star>\w+))?\.(?P<prefix>\w+)$', str_prec)
if mtch is None:
return None
entries = comm.op('getCompletions', {
'module': op.view_module_name(self.view),
'star': mtch.group('star'),
'prefix': mtch.group('prefix')
})
return (
[(x, x) for x in entries],
sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
)
| 26.796296 | 83 | 0.612301 |
acf164ea4fcda861e19a37b8003980d68b66b982 | 78,534 | py | Python | prep/Timecourses.py | orena1/SpineImagingALM | 372ae843e52c27f2298bc415ef267c18aed995cb | [
"MIT"
] | 2 | 2019-11-30T00:57:07.000Z | 2020-08-26T23:49:15.000Z | prep/Timecourses.py | elifesciences-publications/SpineImagingALM | 5f40e5182f298c3beba4956c37d7312f5e8b52b3 | [
"MIT"
] | 10 | 2020-03-24T17:12:03.000Z | 2022-03-11T23:51:43.000Z | prep/Timecourses.py | elifesciences-publications/SpineImagingALM | 5f40e5182f298c3beba4956c37d7312f5e8b52b3 | [
"MIT"
] | 2 | 2019-10-23T14:34:50.000Z | 2020-10-21T00:59:10.000Z | # Encoding: utf-8
""" Module to interact with the Matlab GUI for spine segmentation
and pulling out time courses from the data
"""
from __future__ import print_function
import copy
import logging
import os
import sys
import itertools
from builtins import zip
from builtins import map
import SimpleITK as sitk
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import plotly.graph_objs as go
import plotly.offline as py
import pandas as pd
import scipy.ndimage as ndimg
import skimage.external.tifffile as tf
import tables
import tqdm
from future.utils import iteritems
from networkx.algorithms.dag import descendants
from IPython.display import clear_output
from neurom.io import load_data
from scipy.interpolate import griddata
from scipy.ndimage.morphology import binary_dilation
from scipy.spatial import KDTree
from sklearn import linear_model
from prep.IO import loadmat, writeTiff
from prep.Log import add_logging_to_file
from prep.Utils import getStructure, log_progress, convert_8bit, angle
# Setup logging
logger = logging.getLogger(__name__)
logger.handlers = []
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter("%(name)s @ %(asctime)s - [%(levelname)s] %(module)s::%(funcName)s: %(message)s"))
ch.setLevel(logging.INFO)
logger.addHandler(ch)
def initTimeDict(session, maskName='mask', um3bin=100, dilate1=0.5, dilate2=0.5, zStep=1, anatomyZstep=1.6):
""" Initialize a time courses dictionary
:param session: current session object
:param maskName: mask name
:param um3bin: microns^3 per bin in dendrite
:param dilate1: first dilation of each spine to subtract from dendrite mask
:param dilate2: second dilation of the combined mask
:param zStep: zStep of extended stack
:param anatomyZstep: Anatomical stack of FOV z spacing
:return: a timeDict dictionary
"""
timeDict = dict()
timeDict['path'] = session.path
timeDict['fullPath'] = session.path + maskName
timeDict['fieldsTform'] = session.embedDict['TFormExp']['fieldsTform']
timeDict['pixelSize'] = np.array([session.pixSizeXY / 1000, session.pixSizeXY / 1000, zStep])
timeDict['um3bin'] = um3bin
timeDict['dilate1'] = dilate1
timeDict['dilate2'] = dilate2
timeDict['anatomyZstep'] = anatomyZstep
timeDict['Fs'] = session.volRate
timeDict['xyPixNum'] = session.Sp['OptionsStruct']['RefPixels']
timeDict['UM_1X'] = float(session.Sp['OptionsStruct']['UM_1X'])
timeDict['fieldMask'] = session.fieldMaskBool
timeDict['imagingLines'] = session.ySize
timeDict['imagingPixels'] = session.xSize
timeDict['flyLines'] = session.optFlyLines
timeDict['nExp'] = session.regDict['nZShifts']
timeDict['finalShifts'] = session.regDict['finalShifts']
timeDict['grpZPos'] = session.regDict['grpZPos']
timeDict['groupZ'] = session.regDict['groupZ']
timeDict['fix_spines'] = False
# path to database
FOV = 'FOV' + str(int(session.meta['FOV'][0]))
path = os.path.join(session.basePath, 'Database', session.animalID, FOV, session.date + session.run)
timeDict['databasePath'] = path
timeDict['SessionPath'] = os.path.join(path, 'Session.tif')
# add_logging_to_file('prep.Timecourses', timeDict['fullPath'], 'Timecourses.log')
logger.info('timeDict initialized with full path:' + timeDict['fullPath'] + '.mat')
return timeDict
def load_hdf5(filename):
""" loads new .mat file save using the -v7.3 flag in matlab.
:param filename: file to load
:return: dict with the values
"""
fh = tables.open_file(filename)
data = dict()
data['labelimg'] = fh.root.newCell.labelimg[:].T
data['dendTable'] = fh.root.newCell.dendTable[:].T
dend = fh.root.newCell.dend[:]
dendFix = []
for d in dend:
dendFix.append(d[0].T)
data['dend'] = dendFix
data['quality'] = fh.root.newCell.quality[:]
data['note'] = fh.root.newCell.note[:]
data['dendNum'] = np.squeeze(fh.root.newCell.dendNum[:]).astype(np.uint8)
return data
def loadMask(timeDict):
""" load a mask from a mat file
:param timeDict: time course dictionary needs fullPath
:return adds: all the data from the mat file
"""
try:
logger.info('Trying to load: %s' % timeDict['fullPath'])
data = loadmat(timeDict['fullPath'][:-4])
except:
try:
data = loadmat(timeDict['fullPath'])
except (NotImplementedError, TypeError, ValueError):
logger.info('Trying to load hdf5 at: %s' % timeDict['fullPath'])
# new -v7.3 mat file is a hdf5 file
data = load_hdf5(timeDict['fullPath'])
timeDict['labelimg'] = data['labelimg']
timeDict['dendTable'] = data['dendTable']
a = load_data(os.path.join(timeDict['databasePath'], 'dendrite.swc'))
timeDict['dendR'] = a.data_block[:, 3]
timeDict['dend'] = data['dend']
timeDict['dendNumAll'] = data['dendNum'] - 1
timeDict['quality'] = data['quality']
timeDict['notes'] = data['note']
timeDict['dims'] = timeDict['labelimg'].shape
timeDict['spineMasks'] = len(np.unique(timeDict['labelimg'])) - 1
timeDict['dendNum'] = len(timeDict['dend'])
logger.info('imported mask with dims: ' + str(timeDict['labelimg'].shape) + ' ,branches: ' +
str(timeDict['dendNum']) + ', spine masks:' + str(timeDict['spineMasks']))
def binDendrite(sc, timeDict):
""" bin the dendrite in um2bin steps
:param sc: Spark Context
:param timeDict: time course dictionary needs data from mat file and um2bin param
:return: adds: dendLabelImg, dendLabelTable
"""
dims = timeDict['dims']
labelimg = timeDict['labelimg']
pixelSize = timeDict['pixelSize']
dend = timeDict['dend']
dendTable = timeDict['dendTable']
dendLabelImg = np.zeros(dims)
dendLabelTable = np.array([], ndmin=2)
# last dendrite mask
counter = np.max(labelimg) + 1
dendLabelImg = dendLabelImg.reshape(-1)
dendPathLength = []
flagFirst = True
# for each dendrite
segmentSize = list()
for i in log_progress(range(timeDict['dendNum']), name='Dendrites'):
if dend[i].shape[0] == 0:
logger.info('Skipped dendrite %d' % (i + 1,))
continue
X = dend[i][:, 1] - 1
Y = dend[i][:, 0] - 1
Z = dend[i][:, 2] - 1
# find radii
currentDend = dendTable[dendTable[:, 7] == i + 1, :]
if len(dend[i].shape) < 2:
r = currentDend[:, 5] / 2
else:
r = griddata(currentDend[:, [2, 3, 4]] / 2, currentDend[:, 5] / 2, dend[i][:, [0, 1, 2]], 'nearest')
# path length distance
distPath = np.sqrt((np.diff(X.astype(np.float32) * pixelSize[0])) ** 2 +
(np.diff(Y.astype(np.float32)) * pixelSize[1]) ** 2 +
(np.diff(Z.astype(np.float32)) * pixelSize[2]) ** 2)
# find the distance along the dendrite and segment into um2bin parts (10um default)
dist = np.sqrt((np.diff(X.astype(np.float32))) ** 2 +
(np.diff(Y.astype(np.float32))) ** 2 +
(np.diff(Z.astype(np.float32))) ** 2)
dist = dist * ((4.0 / 3.0) * np.pi * r[:-1])
stopPt = 0
# logger.info('Dendrite: %d' % (i + 1))
sys.stdout.flush()
# for each dendrite segment dilate and add to label image
# for each dendrite segment dilate and add to label image
if len(X) == 2:
starts = [0]
ends = [2]
lengths = [np.sum(distPath[0:2])]
else:
starts = []
ends = []
lengths = []
while stopPt < len(X):
startPt = stopPt
stop1 = np.where(np.cumsum(dist[startPt:]) > timeDict['um3bin'])[0] + startPt
if len(stop1) > 0:
stopPt = np.min(np.array([stop1[0], len(X)]))
else:
stopPt = len(X)
if startPt == stopPt:
stopPt += 2
starts.append(startPt)
ends.append(stopPt)
d = np.sum(distPath[startPt:stopPt])
if d == 0.0:
logger.error('Distance is 0!')
break
else:
lengths.append(d)
rdd = sc.parallelize(list(zip(range(len(starts)), starts, ends)))
def mask(kse):
key, start, stop = kse
dendLabelImgTemp = np.zeros(dims)
x1 = X[start:stop].astype(int)
y1 = Y[start:stop].astype(int)
z1 = Z[start:stop].astype(int)
# make sure no overflow
x1[x1 >= dims[0]] = dims[0] - 1
y1[y1 >= dims[1]] = dims[1] - 1
z1[z1 >= dims[2]] = dims[2] - 1
dendLabelImgTemp[x1, y1, z1] = 1
sSize = np.mean(r[start:stop])
dendLabelImgTemp = binary_dilation(dendLabelImgTemp > 0,
getStructure(np.array([pixelSize[0], pixelSize[1], 2.5]), sSize))
index_inner = np.ravel_multi_index(np.where(dendLabelImgTemp > 0), dims)
return key, sSize, index_inner
indexRdd = rdd.map(mask).collect()
for segNum, size, index in indexRdd:
dendLabelImg[index] = counter
if flagFirst:
dendLabelTable = np.array([counter, i, segNum], ndmin=2)
flagFirst = False
else:
dendLabelTable = np.append(dendLabelTable, np.array([counter, i, segNum], ndmin=2), axis=0)
counter += 1
segmentSize.append(size)
dendPathLength.append(np.array(lengths))
dendLabelImg = dendLabelImg.reshape(labelimg.shape)
timeDict['dendLabelImg'] = dendLabelImg
timeDict['dendLabelTable'] = dendLabelTable
timeDict['dendPathLength'] = dendPathLength
timeDict['dendSize'] = segmentSize
writeTiff(timeDict['path'], dendLabelImg, 'dendLabelImg')
def binDendriteByDistance(sc, timeDict, um4bin, hasSoma=True):
""" bin the dendrite in um4bin steps
:param sc: Spark Context
:param timeDict: time course dictionary needs data from mat file and um2bin param
:return: adds: dendLabelImg, dendLabelTable
"""
dims = timeDict['dims']
labelimg = timeDict['labelimg']
pixelSize = timeDict['pixelSize']
dend = timeDict['dend']
dendTable = timeDict['dendTable']
dendLabelImg = np.zeros(dims)
dendLabelTable = np.array([], ndmin=2)
# last dendrite mask
counter = np.max(labelimg) + 1
dendLabelImg = dendLabelImg.reshape(-1)
dendPathLength = []
flagFirst = True
# for each dendrite
if hasSoma:
if 'cellIndexAll' in timeDict:
logger.info('Using cellIndexAll')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndexAll']].DendNum.values)
else:
logger.info('Using cellIndex')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndex']].DendNum.values)
logger.info('Found %s as soma dendrite number(s)' % soma_dends)
else:
soma_dends = []
valid = [len(dend[k].shape) == 2 for k in soma_dends]
soma_dends = soma_dends[valid]
timeDict['soma_dends'] = soma_dends
segmentSize = list()
for i in log_progress(range(timeDict['dendNum']), name='Dendrites'):
if (dend[i].shape[0] == 0) | (len(dend[i].shape) < 2):
logger.info('Skipped dendrite %d' % (i + 1,))
continue
if len(soma_dends)>0:
if i == soma_dends[0]:
somaFlag = True
else:
somaFlag = False
else:
somaFlag = False
if len(soma_dends) >= 2 and somaFlag:
# soma mask needs stiching
a = [(dend[k][:, 1] - 1) for k in soma_dends]
X = np.hstack(a)
a = [(dend[k][:, 0] - 1) for k in soma_dends]
Y = np.hstack(a)
a = [(dend[k][:, 2] - 1) for k in soma_dends]
Z = np.hstack(a)
indexs = [dendTable[:, 7] == k + 1 for k in soma_dends]
index = np.logical_or(indexs[0], indexs[1])
currentDend = dendTable[index, :]
else:
if len(soma_dends) >= 2 and i in soma_dends:
continue
X = dend[i][:, 1] - 1
Y = dend[i][:, 0] - 1
Z = dend[i][:, 2] - 1
# find radii
currentDend = dendTable[dendTable[:, 7] == i + 1, :]
if len(dend[i].shape) < 2:
r = currentDend[:, 5] / 2
else:
r = griddata(currentDend[:, [2, 3, 4]] / 2, currentDend[:, 5] / 2, dend[i][:, [0, 1, 2]], 'nearest')
# path length distance
dist = np.sqrt((np.diff(X.astype(np.float32) * pixelSize[0])) ** 2 +
(np.diff(Y.astype(np.float32)) * pixelSize[1]) ** 2 +
(np.diff(Z.astype(np.float32)) * pixelSize[2]) ** 2)
if somaFlag:
starts = [0]
ends = [len(X)]
lengths = [np.sum(dist)]
else:
if len(X) == 2:
pass # continue
else:
stopPt = 0
starts = []
ends = []
lengths = []
while stopPt < len(X):
startPt = stopPt
stop1 = np.where(np.cumsum(dist[startPt:]) > um4bin)[0] + startPt
if len(stop1) > 0:
stopPt = np.min(np.array([stop1[0], len(X)]))
else:
break
starts.append(startPt)
ends.append(stopPt)
d = np.sum(dist[startPt:stopPt])
if d == 0.0:
logger.error('Distance is 0!')
break
else:
lengths.append(d)
rdd = sc.parallelize(list(zip(range(len(starts)), starts, ends)))
def mask(kse):
key, start, stop = kse
dendLabelImgTemp = np.zeros(dims)
x1 = X[start:stop].astype(int)
y1 = Y[start:stop].astype(int)
z1 = Z[start:stop].astype(int)
# make sure no overflow
x1[x1 >= dims[0]] = dims[0] - 1
y1[y1 >= dims[1]] = dims[1] - 1
z1[z1 >= dims[2]] = dims[2] - 1
dendLabelImgTemp[x1, y1, z1] = 1
sSize = np.mean(r[start:stop])
dendLabelImgTemp = binary_dilation(dendLabelImgTemp > 0,
getStructure(np.array([pixelSize[0], pixelSize[1], 2.5]), sSize))
index_inner = np.ravel_multi_index(np.where(dendLabelImgTemp > 0), dims)
return key, sSize, index_inner
indexRdd = rdd.map(mask).collect()
for segNum, size, index in indexRdd:
if len(index)>0:
dendLabelImg[index] = counter
if flagFirst:
dendLabelTable = np.array([counter, i, segNum], ndmin=2)
flagFirst = False
else:
dendLabelTable = np.append(dendLabelTable, np.array([counter, i, segNum], ndmin=2), axis=0)
counter += 1
segmentSize.append(size)
dendPathLength.append(np.array(lengths))
dendLabelImg = dendLabelImg.reshape(labelimg.shape)
timeDict['dendLabelImgB'] = dendLabelImg
timeDict['dendLabelTableB'] = dendLabelTable
timeDict['dendPathLengthB'] = dendPathLength
timeDict['dendSizeB'] = segmentSize
writeTiff(timeDict['path'], dendLabelImg, 'dendLabelImgB')
def getMasks(timeDict, makeB=False):
""" get spine masks by dilating twice
:param timeDict: time course dictionary need dendrite masks and pixelSize, dilate1 and dilate2
:return: adds labelimgAll
"""
labelimg = timeDict['labelimg']
if not makeB:
dendLabelImg = timeDict['dendLabelImg']
else:
dendLabelImg = timeDict['dendLabelImgB']
pixelSize = timeDict['pixelSize']
labelimg2 = copy.deepcopy(labelimg)
labelimg2 = labelimg2.reshape(-1)
rangeEnd = np.max(labelimg) + 1
# logger.info('Going over spines')
for i in log_progress(range(1, rangeEnd), name='Spines'):
index = np.ravel_multi_index(np.where(labelimg == i), labelimg.shape)
labelimgTemp = np.zeros(labelimg.shape)
labelimgTemp = labelimgTemp.reshape(-1)
labelimgTemp[index] = True
labelimgTemp = labelimgTemp.reshape(labelimg.shape)
labelimgTemp = binary_dilation(labelimgTemp, getStructure(pixelSize, timeDict['dilate1']))
index2 = np.ravel_multi_index(np.where(labelimgTemp), labelimg.shape)
labelimg2[index2] = i
labelimg2 = labelimg2.reshape(labelimg.shape)
labelimg3 = np.invert(binary_dilation(labelimg2 > 0, getStructure(pixelSize, timeDict['dilate2'])))
dendLabelImg2 = dendLabelImg * labelimg3
labelimgAll = dendLabelImg2 + labelimg
if not makeB:
timeDict['labelimgAll'] = labelimgAll
else:
timeDict['labelimgAllB'] = labelimgAll
def getMasksDF(timeDict, makeB=False, plot=True):
""" get Masks DataFrame and fix dendrite assignments
:param timeDict: time course dictionary
:param fix: if True will try to reassign spine numbers to dendrite numbers based on distance
:return: adds the Masks DataFrame
"""
fix = timeDict['fix_spines']
# todo: check for nans
if not makeB:
labelimgAll = timeDict['labelimgAll']
dendLabelTable = timeDict['dendLabelTable']
dendPathLength = timeDict['dendPathLength']
else:
labelimgAll = timeDict['labelimgAllB']
dendLabelTable = timeDict['dendLabelTableB']
dendPathLength = timeDict['dendPathLengthB']
mask = (labelimgAll > 0).astype(int)
label = labelimgAll
nMasks = label.max().astype(int)
centers = np.array(ndimg.center_of_mass(mask, label, range(1, nMasks + 1)))
pixelSize = timeDict['pixelSize'][0]
centers[:, 0:2] = centers[:, 0:2] * pixelSize
Centers = pd.Series([tuple(row) for row in centers])
types = ['Spine'] * timeDict['dendNumAll'].shape[0] + ['Dendrite'] * dendLabelTable.shape[0]
MaskType = pd.Series(types)
index = dendLabelTable[:, 0]
DendNum = pd.Series(np.concatenate((timeDict['dendNumAll'], dendLabelTable[:, 1].T)))
DendSegment = pd.Series(dendLabelTable[:, 2], index=index - 1)
Masks = pd.DataFrame({'Centers': Centers,
'MaskType': MaskType,
'DendNum': DendNum,
'DendSegment': DendSegment})
dendrites = Masks[(Masks['MaskType'] == 'Dendrite')]['DendNum'].unique()
dendCenters = dict()
for dend in dendrites:
dendCenters[dend] = np.asarray(
list(Masks[(Masks['MaskType'] == 'Dendrite') & (Masks['DendNum'] == dend)]['Centers'].values))
spines = Masks[(Masks['MaskType'] == 'Spine')]['DendNum'].unique()
if fix:
indexReplace = [None] * len(spines)
for index, S_dend in enumerate(spines):
indexReplace[index] = Masks[(Masks['MaskType'] == 'Spine') & (Masks['DendNum'] == S_dend)].index
new_values = [None] * len(spines)
for index, S_dend in enumerate(spines):
SpineCenters = np.asarray(
list(Masks[(Masks['MaskType'] == 'Spine') & (Masks['DendNum'] == S_dend)]['Centers'].values))
minDist = np.zeros((SpineCenters.shape[0], dendrites.shape[0]))
for indexS, spine in enumerate(SpineCenters):
for indexD, dend in enumerate(dendrites):
currentDend = dendCenters[dend]
minDist[indexS, indexD] = np.min(np.sum((currentDend - spine) ** 2, axis=1))
# indexReplace = Masks[(Masks['MaskType'] == 'Spine') & (Masks['DendNum'] == S_dend)].index
new_values[index] = dendrites[np.argmin(minDist.mean(axis=0))].astype(int)
# Masks['DendNum'][indexReplace[index]] = new_values
logger.info('Old: %d, new: %d' % (S_dend, new_values[index]))
for index, S_dend in enumerate(spines):
Masks['DendNum'][indexReplace[index]] = new_values[index]
Masks.dendPathLength = np.hstack(dendPathLength)
if not makeB:
timeDict['Masks'] = Masks
else:
for index in Masks[(Masks['MaskType'] == 'Spine')].index:
Masks['DendNum'][index] = timeDict['Masks']['DendNum'][index]
timeDict['MasksB'] = Masks
if plot:
SpineCenters = np.asarray(list(Masks[(Masks.MaskType == 'Spine')]['Centers']))
SpineDend = np.asarray(list(Masks[(Masks.MaskType == 'Spine')]['DendNum']))
plt.figure(figsize=(13, 13))
plt.imshow(labelimgAll.max(axis=2).transpose(1, 0))
for center, dend in zip(SpineCenters, SpineDend):
plt.text(center[0] / pixelSize, center[1] / pixelSize, str(dend))
dendNums = np.unique(dendLabelTable[:, 1].T)
for num in dendNums:
current = Masks[(Masks.MaskType == 'Dendrite') & (Masks.DendNum == num)].Centers
XY = (np.array(list(map(list, current.values)))[:, [0, 1]].mean(axis=0) / pixelSize).astype(int)
plt.text(XY[0], XY[1], str(num), size=20, color='white')
def getFieldMasks(timeDict, makeB=False):
""" assigns the masks back to the original data format
:param timeDict: time course dictionary need labelimgAll, fieldsTform
:return: adds: labelList
"""
if not makeB:
labelimgAll = timeDict['labelimgAll']
else:
labelimgAll = timeDict['labelimgAllB']
fieldsTform = timeDict['fieldsTform']
linearLabelimg = labelimgAll.flatten(order='F')
nLabel = np.max(linearLabelimg).astype(np.int64)
labelInfoAll = [None] * len(fieldsTform)
for i in range(0, len(fieldsTform)):
Tform = fieldsTform[i].T
labelInfo = np.zeros((Tform.shape[0], 3))
for j in range(0, Tform.shape[0]):
try:
labelInfo[j, 0] = linearLabelimg[int(Tform[j, 0])]
except Exception:
logger.error(str(i) + ' ' + str(j))
labelInfo[j, 0] = linearLabelimg[int(Tform[j, 0])]
labelInfo[j, 1] = Tform[j, 1]
labelInfo[j, 2] = i
labelInfoAll[i] = labelInfo.T
labelArray = np.hstack(labelInfoAll)
labelList = [None] * nLabel
for i in range(0, nLabel):
labelList[i] = labelArray[1:3, labelArray[0, :] == (i + 1)]
if not makeB:
timeDict['labelList'] = labelList
else:
timeDict['labelListB'] = labelList
def getRegionData(sc, data, timeDict, makeB=False):
""" get time course data
:param sc: SparkContext
:param data: Thunder Images object
:param timeDict: time course dictionary needs labelList
:return: adds: TC, TCPixels, TCMotion
"""
# todo: if tracing out of bounds!
if not makeB:
labelList = copy.deepcopy(timeDict['labelList'])
else:
labelList = copy.deepcopy(timeDict['labelListB'])
# maxIndex = np.prod(data.shape[1:]) * len(grpZPos)
# for i, label in enumerate(labelList):
# index = label[0, :] < maxIndex
# labelList[i][0, :] =labelList[i][0, :][index]
# index = label[1, :] < maxIndex
# labelList[i][1, :] =labelList[i][1, :][index]
finalShifts = timeDict['finalShifts']
grpZPos = timeDict['grpZPos']
groupZ = timeDict['groupZ']
labelListBC = sc.broadcast(labelList)
shiftsBC = sc.broadcast(finalShifts)
RegMean = groupZ.transpose(1, 2, 0, 3).flatten(order='F')
RegMeanBC = sc.broadcast(RegMean)
# old = np.seterr(all='raise')
# logger.info('Set numpy errors to raise')
def offsetVol(vol, pos, grpZPos2):
np.seterr(all='raise')
out = np.zeros((vol.shape[0], vol.shape[1], grpZPos2.shape[0] * vol.shape[2]))
out[:] = np.NAN
offset = np.argmin(np.absolute(grpZPos2 - pos))
for i in range(0, vol.shape[2]):
newFieldID = i * grpZPos2.shape[0] + offset
out[:, :, newFieldID] = vol[:, :, i]
return out.flatten(order='F').astype('float32')
def offsetVolPar(kv):
np.seterr(all='raise')
key, ary = kv
return offsetVol(ary, shiftsBC.value[np.array(key).astype(int), 0, 2], grpZPos).astype('float32')
def nanMeanByRegions(kv):
np.seterr(all='raise')
key, ary = kv
mean_values = []
for grp in labelListBC.value:
a = np.nansum(ary[np.array(grp[1, :], dtype='uint32')] * grp[0, :].flatten())
b = np.sum(~np.isnan(ary[np.array(grp[1, :], dtype='uint64')]) * grp[0, :].flatten())
if b == 0.0:
mean_values.append(np.nan)
else:
mean_values.append(a / b)
# mean_values = [np.nansum(ary[np.array(grp[1, :], dtype='uint32')] * grp[0, :].flatten()) / np.sum(
# ~np.isnan(ary[np.array(grp[1, :], dtype='uint64')]) * grp[0, :].flatten()) for grp in labelListBC.value]
return np.array(mean_values, dtype=ary.dtype).reshape((1, 1, -1))
def nanMeanByRegionsMotion(kv):
np.seterr(all='raise')
key, ary = kv
compMean = RegMeanBC.value * np.absolute(np.sign(ary))
norm_values = []
for grp in labelListBC.value:
a = np.nansum(compMean[np.array(grp[1, :], dtype='uint32')] * grp[0, :].flatten())
b = np.sum(~np.isnan(compMean[np.array(grp[1, :], dtype='uint32')]) * grp[0, :].flatten())
if b == 0.0:
norm_values.append(np.nan)
else:
norm_values.append(a / b)
return np.array(norm_values, dtype='float32').reshape((1, 1, -1))
def nanMeanByRegionsPixels(kv):
np.seterr(all='raise')
key, ary = kv
pixels = [np.sum(~np.isnan(ary[np.array(grp[1, :], dtype='uint64')]) * grp[0, :].flatten()) for grp in
labelListBC.value]
return np.array(pixels, dtype='float32').reshape((1, 1, -1))
RegDataExp = data.map(offsetVolPar, with_keys=True)
RegDataExp.cache()
RegDataExp.count()
if not makeB:
timeDict['TC'] = RegDataExp.map(nanMeanByRegions, with_keys=True).toarray().T
logger.info('Got TC')
timeDict['TCMotion'] = RegDataExp.map(nanMeanByRegionsMotion, with_keys=True).toarray().T
logger.info('Got TCMotion')
timeDict['TCPixels'] = RegDataExp.map(nanMeanByRegionsPixels, with_keys=True).toarray().T
logger.info('Got TCPixels')
else:
timeDict['TCB'] = RegDataExp.map(nanMeanByRegions, with_keys=True).toarray().T
logger.info('Got TCB')
timeDict['TCMotionB'] = RegDataExp.map(nanMeanByRegionsMotion, with_keys=True).toarray().T
logger.info('Got TCMotionB')
timeDict['TCPixelsB'] = RegDataExp.map(nanMeanByRegionsPixels, with_keys=True).toarray().T
logger.info('Got TCPixelsB')
RegDataExp.uncache()
# np.seterr(**old)
# logger.info('Set numpy errors to: %s' % old)
def getBaseline(sc, timeDict, regWindow=1000, maxZscore=2.0, step=8, makeB=False):
""" estimates baseline
:param sc: Spark Context
:param timeDict: time course dictionary needs: TC and TCMotion
:param regWindow: number of time points to smooth
:param maxZscore: maximum z score for a good point
:param step:
:return: adds: TCBaseline
"""
from sklearn import linear_model
# t = time.time()
timeDict['regWindow'] = regWindow
timeDict['maxZscore'] = maxZscore
timeDict['step'] = step
def estBaseline(key, model_inner):
np.seterr(all='warn')
from scipy.stats import gaussian_kde
start = int(max((0, key - regWindow / 2)))
stop = int(min((len(x), key + regWindow / 2)))
x1 = x_BC.value[start:stop]
y1 = y_BC.value[start:stop]
# p1 = p[start:stop]
x2 = x1[np.logical_not(np.isnan(x1))]
y2 = y1[np.logical_not(np.isnan(y1))]
if np.any(y2):
if len(y2) > 100:
kernel = gaussian_kde(y2)
low, high = np.percentile(y2, [25, 75]).astype(int)
step_inner = (high - low) / 100.
testRange = low + np.arange(start=1, stop=101, dtype=int) * step_inner
estMode = testRange[np.argmax(kernel(testRange))]
else:
estMode = np.median(y2)
y3 = y2[(y2 - estMode) < 0] - estMode
std = np.std(np.hstack((y3, -y3)))
zscore = (y1 - estMode) / std
goodPts = np.logical_and((zscore < maxZscore), not_nan_BC.value[start:stop])
else:
goodPts = []
if np.any(goodPts):
model_inner = model_inner.fit(x1[goodPts].reshape(-1, 1), y1[goodPts].reshape(-1, 1))
coef_inner = model_inner.coef_
if coef_inner < 0.1:
coef_inner = np.nanmean(y2) / np.nanmean(x2)
else:
coef_inner = np.NAN
return key, np.squeeze(coef_inner)
model = linear_model.LinearRegression(fit_intercept=False)
if not makeB:
TC = timeDict['TC']
TCMotion = timeDict['TCMotion']
TCPixels = timeDict['TCPixels']
else:
TC = timeDict['TCB']
TCMotion = timeDict['TCMotionB']
TCPixels = timeDict['TCPixelsB']
TCBaselineDict = dict()
inter_x = np.arange(0, TC.shape[1], 1, int)
inter_xp = np.arange(0, TC.shape[1], step, int)
for i in tqdm.tqdm(range(TC.shape[0])):
x = TCMotion[i, :]
y = TC[i, :]
p = TCPixels[i, :]
not_nan = np.logical_and(np.logical_not(np.isnan(x)), np.logical_not(np.isnan(y)))
not_nan = np.logical_and(not_nan, np.logical_not(np.isnan(p)))
x_BC = sc.broadcast(x)
y_BC = sc.broadcast(y)
not_nan_BC = sc.broadcast(not_nan)
coefDict = sc.parallelize(range(0, len(x), step)).map(lambda x2: estBaseline(x2, model)).collectAsMap()
coef = np.array([coefDict[idx] for idx in range(0, len(x), step)])
coef = np.interp(inter_x, inter_xp, coef)
TCBaselineDict[i] = x * np.squeeze(coef)
x_BC.unpersist()
y_BC.unpersist()
not_nan_BC.unpersist()
# current = time.time() - t
# m, s = divmod(current, 60)
# logger.info('i: %d, %02d:%02d' % (i, m, s))
# sys.stdout.flush()
TCBaseline = np.array([np.squeeze(TCBaselineDict[idx]) for idx in TCBaselineDict.keys()])
if not makeB:
timeDict['TCBaseline'] = TCBaseline
old = np.seterr(all='warn')
timeDict['TCdiv'] = TC / TCBaseline - 1
else:
timeDict['TCBaselineB'] = TCBaseline
old = np.seterr(all='warn')
timeDict['TCdivB'] = TC / TCBaseline - 1
def getNoise(sc, timeDict, makeB=False):
""" estimate noise
:param sc: Spark Context
:param timeDict: time course dictionary needs: TC and TCMotion
:return: adds TCNoise and TCZscore
"""
def fitNoise(key):
from scipy.optimize import curve_fit
def model_noise(x2, Ndiv, Nscale, offset):
lambdaAct = ((x2 + offset) ** Nscale) / Ndiv
return (lambdaAct ** 0.5) / lambdaAct
idx_inner = (TCdiv_BC.value[key[0], :] < 0).nonzero()[0]
TCPixels2 = copy.deepcopy(TCPixels_BC.value[key[0], :])
TCPixels2[TCPixels_BC.value[key[0], :] == 0] = np.NAN
x = TCPixels2[idx_inner] * TCBaseline_BC.value[key[0], idx_inner]
y = -TCdiv_BC.value[key[0], idx_inner]
validTps = np.isfinite(x) & np.isfinite(y)
x = x[validTps]
y = y[validTps]
x2 = x[x > 0]
y2 = y[x > 0]
try:
opt_parameters, parm_cov = curve_fit(model_noise, x2, y2 ** 2, maxfev=10000, method='trf')
if np.any(np.logical_not(np.isfinite(opt_parameters))):
TC_noise = np.ones_like(TCPixels2) * np.mean(y2 ** 2) ** 0.5
else:
TC_noise = model_noise(TCPixels2 * TCBaseline_BC.value[key[0], :], opt_parameters[0],
opt_parameters[1], opt_parameters[2]) ** 0.5
except:
TC_noise = np.ones_like(TCPixels2) * np.mean(y2 ** 2) ** 0.5
return key, TC_noise
if not makeB:
TCdiv_BC = sc.broadcast(timeDict['TCdiv'])
TCPixels_BC = sc.broadcast(timeDict['TCPixels'])
TCBaseline_BC = sc.broadcast(timeDict['TCBaseline'])
idxList = list(zip(range(0, timeDict['TCdiv'].shape[0])))
else:
TCdiv_BC = sc.broadcast(timeDict['TCdivB'])
TCPixels_BC = sc.broadcast(timeDict['TCPixelsB'])
TCBaseline_BC = sc.broadcast(timeDict['TCBaselineB'])
idxList = list(zip(range(0, timeDict['TCdivB'].shape[0])))
fitNoiseDict = sc.parallelize(idxList).map(fitNoise).collectAsMap()
TCNoise = np.array([fitNoiseDict[idx] for idx in idxList])
if not makeB:
timeDict['TCNoise'] = TCNoise
timeDict['TCzscore'] = timeDict['TCdiv'] / TCNoise
else:
timeDict['TCNoiseB'] = TCNoise
timeDict['TCzscoreB'] = timeDict['TCdivB'] / TCNoise
def loadInfo(timeDict):
""" load the .mat file with session info
:param timeDict: time course dictionary
:return: loaded mat file
"""
Info = loadmat(os.path.join(timeDict['databasePath'], 'prepareMasksAutoSWC.mat'))
timeDict['InfoSWC'] = Info['Info']
logger.info('Loaded ' + timeDict['path'] + 'prepareMasksAutoSWC.mat')
Info = loadmat(os.path.join(timeDict['databasePath'], 'prepareMasksAuto.mat'))
timeDict['Info'] = Info['Info']
logger.info('Loaded ' + timeDict['path'] + 'prepareMasksAuto.mat')
def getTransform(timeDict, outputFile='InvTransform.h5', getAligned=True, do_8bit=True, sat=1):
""" calculates a affine transformation from session space to anatomy stack space
:param timeDict: time course dict
:param outputFile: name of transform file to save to
:param getAligned: flag to apply the transformation and return 'Aligned' to timeDict
:return: inverse transformation (from session space to anatomical stack space)
"""
# callback invoked when the StartEvent happens, sets up our new data
def start_plot():
global metric_values, multires_iterations
metric_values = []
multires_iterations = []
# callback invoked when the EndEvent happens, do cleanup of data and figure
def end_plot():
global metric_values, multires_iterations
try:
del metric_values
del multires_iterations
# close figure, we don't want to get a duplicate of the plot latter on
plt.close()
except Exception:
pass
# callback invoked when the IterationEvent happens, update our data and display new figure
def plot_values(registration_method_inner):
global metric_values, multires_iterations
val = registration_method_inner.GetMetricValue()
if np.isfinite(val):
metric_values.append(registration_method_inner.GetMetricValue())
# clear the output area (wait=True, to reduce flickering), and plot current data
clear_output(wait=True)
# plot the similarity metric values
plt.plot(metric_values, 'r')
plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')
plt.xlabel('Iteration Number', fontsize=12)
plt.ylabel('Metric Value', fontsize=12)
plt.show()
# callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
pxSize = timeDict['pixelSize'][0]
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
aMaskSmooth = tf.imread(timeDict['SessionPath']).transpose(1, 2, 0)
sMaskSmooth = tf.imread(os.path.join(timeDict['databasePath'], 'expended_new.tif')).transpose(2, 1, 0)
sMaskSmooth2 = sMaskSmooth.flatten()[np.logical_not(np.isnan(sMaskSmooth.flatten()))]
t = np.median(sMaskSmooth2)
sMaskSmooth[sMaskSmooth < t] = 0
sMaskSmooth = np.nan_to_num(sMaskSmooth)
zNum = aMaskSmooth.shape[2]
timeDict['zNum'] = zNum
timeDict['sMaskSmooth'] = sMaskSmooth
timeDict['aMaskSmooth'] = aMaskSmooth
logger.info('Prepared images')
sys.stdout.flush()
target = sitk.GetImageFromArray(aMaskSmooth)
moving = sitk.GetImageFromArray(sMaskSmooth)
target.SetSpacing((timeDict['anatomyZstep'], xyStep, xyStep))
moving.SetSpacing((timeDict['pixelSize'][2], pxSize, pxSize))
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsCorrelation()
registration_method.SetInterpolator(sitk.sitkLinear)
# optimizer settings
registration_method.SetOptimizerAsGradientDescent(learningRate=0.5, numberOfIterations=20,
convergenceMinimumValue=1e-6, convergenceWindowSize=10,
estimateLearningRate=sitk.ImageRegistrationMethod.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
# setup for the multi-resolution framework
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# connect all of the observers so that we can perform plotting during registration
registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))
transform = sitk.CenteredTransformInitializer(target, moving, sitk.AffineTransform(3),
sitk.CenteredTransformInitializerFilter.MOMENTS)
registration_method.SetInitialTransform(transform)
registration_method.Execute(target, moving)
logger.info('Final metric value: {0}'.format(registration_method.GetMetricValue()))
logger.info('Optimizer\'s stopping condition, {0}'.format(
registration_method.GetOptimizerStopConditionDescription()))
if getAligned:
reSampler = sitk.ResampleImageFilter()
reSampler.SetTransform(transform)
reSampler.SetSize(target.GetSize())
reSampler.SetOutputSpacing(target.GetSpacing())
aligned = sitk.GetArrayFromImage(reSampler.Execute(moving))
timeDict['aligned'] = aligned
path = os.path.join(timeDict['path']+'View', '')
writeTiff(path, aligned, 'aligned')
if do_8bit:
aligned_8bit = convert_8bit(aligned, sat_percent=sat, ignore_nans=False, ignore_zero=True)
writeTiff(path, aligned_8bit, 'aligned_8bit', dtype='uint8')
transformInv = transform.GetInverse()
sitk.WriteTransform(transformInv, timeDict['path'] + outputFile)
logger.info('Saved inverse transform to ' + timeDict['path'] + outputFile)
return transformInv
def transformPoints(timeDict, transformInv, useSWC=True, makeB=False):
""" transform the center points of all masks to anatomy space in um
:param timeDict: time course dictionary
:param transformInv: the SimpleITK transformation
:param useSWC: load data from swc reconstruction file
:return: adds AnatomyCenters, ConnectingPoint and ConnectingDist to Masks DataFrame
"""
# transform point to anatomy space
if useSWC:
Info = timeDict['InfoSWC']
All = Info['AllSWC']
else:
Info = timeDict['Info']
All = Info['All']
img = tf.imread(timeDict['SessionPath']).astype(int).transpose(1, 2, 0)
imgSize = img.shape
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
if not makeB:
Masks = timeDict['Masks']
else:
Masks = timeDict['MasksB']
AnatomyCenters = list([])
for center in Masks.Centers:
point = transformInv.TransformPoint((center[2], center[0], center[1]))
AnatomyCenters.append((point[1], point[2], point[0]))
Masks['AnatomyCenters'] = AnatomyCenters
# find closest point in interpolated space
Connecting = list([])
segList = list([])
for segNum, segment in enumerate(All):
x, y, z = np.unravel_index(segment, imgSize, 'F')
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * xyStep
y = y * xyStep
z = z * timeDict['anatomyZstep']
if len(x.shape) > 0:
Connecting.extend(list(zip(x, y, z)))
segList.extend([segNum] * x.shape[0])
else:
Connecting.append((x, y, z))
segList.append(segNum)
Connecting = np.array(Connecting)
tree = KDTree(Connecting)
distances, indexes = tree.query(AnatomyCenters)
loc = Connecting[indexes, :]
seg = np.array(segList)[indexes]
loc2 = list(map(tuple, loc))
Masks['Segment'] = seg
Masks['ConnectingPoint'] = loc2
Masks['ConnectingDist'] = distances
# find closest segment
if useSWC:
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
else:
Table2 = np.array(timeDict['Info']['Table'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0] # * xyStep
Table[:, 1] = Table[:, 1] # * xyStep
Table[:, 2] = Table[:, 2] # * timeDict['anatomyZstep']
tree2 = KDTree(Table)
distances2, indexes2 = tree2.query(AnatomyCenters)
Ids = Table2[indexes2, 0]
Masks['ParentId'] = Ids
# timeDict['Masks'] = Masks
def loadTransform(path, filename='InvTransform.h5'):
""" loads a SimpleITK transformation object
:param path: path to file (session.path)
:param filename: transformation filename (*.h5)
:return: the transformation
"""
return sitk.ReadTransform(path + filename)
def getExcludeIndexB(timeDict):
li=timeDict['labelimgAll']
liB=timeDict['labelimgAllB']
eI = timeDict['excludeIndex']
Masks = timeDict['Masks']
spineIdx = np.asarray(list(Masks[(Masks.MaskType == 'Spine')].index))
dendIdx = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')].index))
eSpine = np.intersect1d(spineIdx,eI)
eDend = np.intersect1d(dendIdx,eI)
if np.any(np.isfinite(eDend)) and len(eDend)>0:
liMask=np.zeros(li.shape)
for idx in eDend:
liMask[li==(idx+1)]=1
eDendB = np.setdiff1d(np.unique(liB[liMask.astype('bool')]),0)-1
eDendNum = timeDict['Masks'].DendNum[eDend]
eDendNumB = timeDict['MasksB'].DendNum[eDendB]
keepIdx = np.intersect1d(eDendNumB,eDendNum)
eDendB=eDendB[[np.any(keepIdx==x) for x in eDendNumB]]
timeDict['excludeIndexB'] = np.hstack([eSpine,eDendB])
else:
timeDict['excludeIndexB'] = eSpine
timeDict['excludeIndexB'] = timeDict['excludeIndexB'].astype(int)
def get_graph_from_swc(timeDict, session):
"""
:param timeDict:
:param session:
:return:
"""
name = session.Sp['OptionsStruct']['filename']
name = str(name[name.rfind('\\') + 1:])
FOV = timeDict['databasePath'][:timeDict['databasePath'].rfind('/') + 1]
table = load_data(os.path.join(FOV, name))
logger.info('loaded SWC from %s: ' % os.path.join(FOV, name))
table2 = pd.DataFrame(data=table.data_block, columns=['x', 'y', 'z', 'r', 'type', 'id', 'pID'])
table2.z = table2.z * 1.6
table2.x = table2.x * (timeDict['UM_1X'] / timeDict['xyPixNum'])
table2.y = table2.y * (timeDict['UM_1X'] / timeDict['xyPixNum'])
table2 = table2[['id', 'type', 'x', 'y', 'z', 'r', 'pID']]
weight = []
start = []
end = []
for line in table2.iterrows():
Id = int(line[0]) + 1
p1 = np.array(line[1][2:5])
pID = int(line[1][6])
if Id == 0 or pID == -1:
weight.append(0)
start.append(-1)
end.append(1)
continue
p2 = np.array(table2.loc[int(pID - 1)][2:5])
length = np.linalg.norm(p2 - p1)
start.append(pID)
end.append(Id)
weight.append(length)
table3 = pd.DataFrame(data=np.array(list(zip(start, end, weight))), columns=['start', 'end', 'weight'])
G = nx.from_pandas_edgelist(table3, source='start', target='end', edge_attr=['weight'])
isTree = nx.algorithms.tree.recognition.is_tree(G)
logger.info('Is tree: %s' % isTree)
timeDict['graph'] = G
def get_path_length(timeDict, makeB=False, plot=True):
G = timeDict['graph']
pathLen = dict(nx.shortest_path_length(G, weight='weight'))
CellBodyPath = pathLen[1.0]
timeDict['CellBodyPath'] = CellBodyPath
if not makeB:
Masks = timeDict['Masks']
labelimgAll = timeDict['labelimgAll']
else:
Masks = timeDict['MasksB']
labelimgAll = timeDict['labelimgAllB']
for i, mask in enumerate(Masks.ParentId):
if mask != 1.0:
Masks.set_value(i, 'PathLength', CellBodyPath[mask])
else:
Masks.set_value(i, 'PathLength', 0)
pathLenArray = np.zeros((len(pathLen) - 1, len(pathLen) - 1))
for key, value in iteritems(pathLen):
if key != -1.0:
for key2 in sorted(value):
if key2 != -1.0:
pathLenArray[int(key - 1), int(key2 - 1)] = value[key2]
timeDict['pathLengthAll'] = pathLenArray
if not makeB:
timeDict['pathIndex'] = np.array(np.argsort(Masks.PathLength))
timeDict['pathLength'] = np.sort(np.array(Masks.PathLength))
else:
timeDict['pathIndexB'] = np.array(np.argsort(Masks.PathLength))
timeDict['pathLengthB'] = np.sort(np.array(Masks.PathLength))
if plot:
plt.figure(figsize=(12, 12))
plt.imshow(labelimgAll.max(axis=2).transpose(1, 0))
DendCenters = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')]['Centers']))
DendPath = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')]['PathLength']))
pixelSize = timeDict['pixelSize'][0]
for center, dend in zip(DendCenters, DendPath):
plt.text(center[0] / pixelSize + 18, center[1] / pixelSize, str(int(dend)), color='r')
def get_soma_dendrites(timeDict):
if 'cellIndexAll' in timeDict:
logger.info('Using cellIndexAll')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndexAll']].DendNum.values)
else:
logger.info('Using cellIndex')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndex']].DendNum.values)
logger.info('Found %s as soma dendrite number(s)' % soma_dends)
timeDict['soma_dends'] = soma_dends
def get_branch_id(timeDict):
"""
:param timeDict:
:return:
"""
G = timeDict['graph']
CellBodyPath = timeDict['CellBodyPath']
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0] * xyStep
Table[:, 1] = Table[:, 1] * xyStep
Table[:, 2] = Table[:, 2] * timeDict['anatomyZstep']
GD = G.to_directed()
dist = {}
for node in GD:
dist[node] = GD.number_of_edges(1, node)
for node in GD:
pred_nodes = [x for x in GD.predecessors(node)]
for pred_node in pred_nodes:
if CellBodyPath[pred_node] >= CellBodyPath[node]:
GD.remove_edge(pred_node, node)
dfs = nx.algorithms.traversal.dfs_preorder_nodes(GD, source=1.0)
counterID = 1
branchID = np.zeros(len(GD))
for node in dfs:
if node == 1:
for nodeID in GD.successors(node):
if nodeID != -1:
branchID[int(nodeID)] = counterID
counterID += 1
else:
succ = np.array(list(GD.successors(node)))
if len(succ) == 1:
branchID[int(succ[0])] = branchID[int(node)]
elif len(succ) >= 2:
out_deg = []
for nodeID in succ:
GS = GD.subgraph(descendants(GD, nodeID))
ODS = GS.out_degree()
out_deg.append(np.sum([x[1] > 1 for x in list(ODS)]))
max_deg = np.max(out_deg)
putShaft = succ[(out_deg == max_deg).nonzero()[0]]
if len(putShaft) == 1:
shaft_node = putShaft
else:
angles = []
vec_in = Table[int(node) - 1, :] - Table[int(list(GD.predecessors(node))[0]) - 1, :]
for psn in putShaft:
vec_out = Table[int(psn) - 1, :] - Table[int(node) - 1, :]
angles.append(angle(vec_in, vec_out))
shaft_node = putShaft[np.argmin(angles)]
for nodeID in succ:
if nodeID == shaft_node:
branchID[int(nodeID)] = branchID[int(node)]
else:
branchID[int(nodeID)] = counterID
counterID += 1
branchID = branchID[1:]
timeDict['branchID'] = branchID
timeDict['directed_graph'] = GD
def get_branch_to_branch(timeDict, makeB=False):
""" get connecting points, distance, relative branch order between branches
:param timeDict:
:return: dend_info
dend_info axis 0: mask in dendrite 1, mask in dendrite 2, distance, soma traverse, relative branch order
dend_info axis 1, 2: number of dendrites in session, the soma, number of terminal leafs
"""
pathLenArray = timeDict['pathLengthAll']
if makeB:
excludeIndex = timeDict['excludeIndexB']
Masks = timeDict['MasksB']
else:
excludeIndex = timeDict['excludeIndex']
Masks = timeDict['Masks']
if np.any(np.isnan(excludeIndex)):
excludeIndex=[]
G = timeDict['graph']
branchID = timeDict['branchID']
soma_dends = timeDict['soma_dends']
exclude_dend = np.unique(Masks.loc[excludeIndex].DendNum.values)
dend = timeDict['dend']
num_dend = len(dend)
transformInv = loadTransform(timeDict['path'])
pixelSizeSession = timeDict['pixelSize']
leafs = [x for x in G.nodes() if G.degree(x) == 1 and x != -1]
n_total = num_dend + len(leafs)
results = np.zeros((5, n_total, n_total))
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0]
Table[:, 1] = Table[:, 1]
Table[:, 2] = Table[:, 2]
tree2 = KDTree(Table)
swc_endpoints = Table2[np.array(leafs).astype(int) - 1, -1]
index_list = []
ids_list = []
distance_list = []
is_terminal = []
branch_modal_node = []
for i in range(num_dend):
dend1 = copy.deepcopy(dend[i])
dend1_fov = []
if len(dend1.shape) < 2:
dend1 = [dend1]
for center in dend1:
point = transformInv.TransformPoint(
(float(center[2]), center[1] * pixelSizeSession[0], center[0] * pixelSizeSession[0]))
dend1_fov.append((point[1], point[2], point[0]))
distances, Indexes = tree2.query(dend1_fov)
if np.median(distances) > 10:
logger.info('Median dist 1 high: = %f' % np.median(distances))
distances_dend1, Indexes_dend1 = tree2.query(dend1_fov)
index_list.append(Indexes_dend1)
distance_list.append(distances_dend1)
Ids_dend1 = Table2[Indexes_dend1, 0]
Ids_dend1 = get_excluded_index(Ids_dend1, pathLenArray, i)
branch_ids, branch_ids_count = np.unique(branchID[np.array(Ids_dend1).astype(int) - 1], return_counts=True)
high_id = branch_ids[np.argmax(branch_ids_count)]
high_id2 = np.where(branchID[np.array(Ids_dend1).astype(int) - 1] == high_id)[0][0]
branch_modal_node.append(Ids_dend1[high_id2])
Ids_dend1 = np.unique(Ids_dend1)
ids_list.append(Ids_dend1)
is_terminal.append(len(np.intersect1d(Ids_dend1, swc_endpoints)) > 0)
for x, y in itertools.product(range(n_total), range(n_total)):
if x in soma_dends or y in soma_dends or x in exclude_dend or y in exclude_dend or x == y:
continue
# print(x, y)
if x < num_dend:
distances_dend1 = distance_list[x]
Indexes_dend1 = index_list[x]
Ids_dend1 = ids_list[x]
branch_modal_node_x = branch_modal_node[x]
elif x == num_dend:
Ids_dend1 = np.array([1])
distances_dend1 = np.array([0])
Indexes_dend1 = np.array([0])
branch_modal_node_x = 1.0
else:
Ids_dend1 = np.array([leafs[x - num_dend]])
distances_dend1 = np.array([0])
Indexes_dend1 = np.array([leafs[x - num_dend] - 1]).astype(int)
branch_modal_node_x = Ids_dend1[0]
if y < num_dend:
distances_dend2 = distance_list[y]
Indexes_dend2 = index_list[y]
Ids_dend2 = ids_list[y]
branch_modal_node_y = branch_modal_node[y]
elif y == num_dend:
Ids_dend2 = np.array([1])
distances_dend2 = np.array([0])
Indexes_dend2 = np.array([0])
branch_modal_node_y = 1.0
else:
Ids_dend2 = np.array([leafs[y - num_dend]])
distances_dend2 = np.array([0])
Indexes_dend2 = np.array([leafs[y - num_dend] - 1]).astype(int)
branch_modal_node_y = Ids_dend2[0]
dist = np.inf
dend1_close = 0
dend2_close = 0
for x2, y2 in itertools.product(Ids_dend1, Ids_dend2):
if timeDict['pathLengthAll'][int(x2 - 1), int(y2 - 1)] < dist:
dist = timeDict['pathLengthAll'][int(x2 - 1), int(y2 - 1)]
dend1_close = x2
dend2_close = y2
if np.isfinite(dist):
Ids_dend1_best = np.where(Table2[Indexes_dend1, 0] == dend1_close)[0]
dend1_best_dist = distances_dend1[Ids_dend1_best]
dend1_best_offset = np.argmin(dend1_best_dist)
dend1_best_point = Ids_dend1_best[dend1_best_offset]
Ids_dend2_best = np.where(Table2[Indexes_dend2, 0] == dend2_close)[0]
dend2_best_dist = distances_dend2[Ids_dend2_best]
dend2_best_offset = np.argmin(dend2_best_dist)
dend2_best_point = Ids_dend2_best[dend2_best_offset]
node_list = nx.shortest_path(G, dend1_close, dend2_close)
crossing_soma = 1.0 in node_list
node_list2 = nx.shortest_path(G, branch_modal_node_x, branch_modal_node_y)
relative_branch_order = len(np.unique(branchID[np.array(node_list2).astype(int) - 1])) - 1
results[:, x, y] = np.array(
[dend1_best_point, dend2_best_point, dist, crossing_soma, relative_branch_order])
else:
raise ValueError('Distance between %d and %d not finite' % (x, y))
if makeB:
timeDict['dend_infoB'] = results
timeDict['dend_is_terminalB'] = is_terminal
else:
timeDict['dend_info'] = results
timeDict['dend_is_terminal'] = is_terminal
def get_mask_to_mask(timeDict, makeB=False, smoothWin=50):
"""
:param timeDict:
:return:
"""
All = []
for x in timeDict['dend']:
if len(x.shape) > 1:
All.append(x[:, [1, 0, 2]])
else:
All.append(x[[1, 0, 2]])
if makeB:
Masks = timeDict['MasksB']
excludeIndex = timeDict['excludeIndexB']
results = timeDict['dend_infoB']
else:
Masks = timeDict['Masks']
excludeIndex = timeDict['excludeIndex']
results = timeDict['dend_info']
G = timeDict['graph']
AnatomyCenters = np.array([np.array(x) for x in Masks.Centers])
# find closest point in interpolated space
segList = []
Dist = []
Indexes = []
maskOrder = []
for segNum, segment in enumerate(All):
maskIdx = (Masks.DendNum == segNum).values.nonzero()[0].astype(int)
AnatomyCentersSeg = AnatomyCenters[maskIdx]
if len(segment.shape) < 2:
segment = segment[np.newaxis, :]
x = segment[:, 0]
y = segment[:, 1]
z = segment[:, 2]
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * timeDict['pixelSize'][0]
y = y * timeDict['pixelSize'][1]
z = z * timeDict['pixelSize'][2]
sx = meanSmooth(x, smoothWin)
sy = meanSmooth(y, smoothWin)
sz = meanSmooth(z, smoothWin)
dist = np.hstack([0, np.cumsum(np.sqrt(np.diff(sx) ** 2 + np.diff(sy) ** 2 + np.diff(sz) ** 2))])
tree = KDTree(np.array(list(zip(x, y, z))))
distances, indexes = tree.query(AnatomyCentersSeg)
Indexes = np.hstack([Indexes, len(Dist) + np.array(indexes)])
Dist = np.hstack([Dist, dist])
maskOrder = np.hstack([maskOrder, maskIdx])
segList = np.hstack([segList, segNum * np.ones(len(dist))])
result2 = copy.copy(results)
Indexes = Indexes.astype(int)
segList = segList.astype(int)
maskOrder = maskOrder.astype(int)
nDend = len(All)
cumIdx = 0
for i in range(result2.shape[1]):
result2[0, i, :] = result2[0, i, :] + cumIdx
result2[1, :, i] = result2[1, :, i] + cumIdx
if i < nDend:
if len(All[i].shape)>1:
cumIdx = cumIdx + len(All[i])
else:
cumIdx = cumIdx + 1
nMasks = len(Indexes)
pathLengthGrid = np.ones([nMasks, nMasks]) * np.NAN
somaTravGrid = np.ones([nMasks, nMasks]) * np.NAN
relBranchOrder = np.ones([nMasks, nMasks]) * np.NAN
for i in range(nMasks):
for j in range(nMasks):
segx = segList[Indexes[i]]
segy = segList[Indexes[j]]
if segx == segy:
pathLengthGrid[maskOrder[i], maskOrder[j]] = np.absolute(Dist[Indexes[i]] - Dist[Indexes[j]])
else:
distx = np.absolute(Dist[Indexes[i]] - Dist[int(result2[0, segx, segy])])
disty = np.absolute(Dist[Indexes[j]] - Dist[int(result2[1, segx, segy])])
pathLengthGrid[maskOrder[i], maskOrder[j]] = distx + disty + result2[2, segx, segy]
somaTravGrid[maskOrder[i], maskOrder[j]] = result2[3, segx, segy]
relBranchOrder[maskOrder[i], maskOrder[j]] = result2[4, segx, segy]
pathLengthSoma = np.ones(nMasks) * np.NAN
branchOrder = np.ones(nMasks) * np.NAN
for i in range(nMasks):
segx = segList[Indexes[i]]
distx = np.absolute(Dist[Indexes[i]] - Dist[int(result2[0, segx, nDend])])
pathLengthSoma[maskOrder[i]] = distx + result2[2, segx, nDend]
branchOrder[maskOrder[i]] = result2[4, segx, nDend] - 1
leafs = [x for x in G.nodes() if G.degree(x) == 1 and x != -1]
nTerminal = len(leafs)
pathLengthTerminals = np.ones([nMasks, nTerminal]) * np.NAN
pathLengthTerminalsTrav = np.ones([nMasks, nTerminal]) * np.NAN
for i in range(nMasks):
for j in range(nTerminal):
segx = segList[Indexes[i]]
distx = np.absolute(Dist[Indexes[i]] - Dist[int(result2[0, segx, nDend + j])])
pathLengthTerminals[maskOrder[i], j] = distx + result2[2, segx, nDend + j]
pathLengthTerminalsTrav[maskOrder[i], j] = result2[3, segx, nDend + j]
if np.any(np.isfinite(excludeIndex)):
pathLengthGrid[:, excludeIndex] = np.NAN
pathLengthGrid[excludeIndex, :] = np.NAN
pathLengthSoma[excludeIndex] = np.NAN
branchOrder[excludeIndex] = np.NAN
pathLengthTerminals[excludeIndex, :] = np.NAN
pathLengthTerminalsTrav[excludeIndex, :] = np.NAN
somaTravGrid[:, excludeIndex] = np.NAN
somaTravGrid[excludeIndex, :] = np.NAN
relBranchOrder[:, excludeIndex] = np.NAN
relBranchOrder[excludeIndex, :] = np.NAN
if makeB:
timeDict['pathLengthGridB'] = pathLengthGrid
timeDict['pathLengthSomaB'] = pathLengthSoma
timeDict['branchOrderB'] = branchOrder
timeDict['pathLengthTerminalsB'] = pathLengthTerminals
timeDict['pathLengthTerminalsTravB'] = pathLengthTerminalsTrav
timeDict['somaTravGridB'] = somaTravGrid
timeDict['relBranchOrderB'] = relBranchOrder
else:
timeDict['pathLengthGrid'] = pathLengthGrid
timeDict['pathLengthSoma'] = pathLengthSoma
timeDict['branchOrder'] = branchOrder
timeDict['pathLengthTerminals'] = pathLengthTerminals
timeDict['pathLengthTerminalsTrav'] = pathLengthTerminalsTrav
timeDict['somaTravGrid'] = somaTravGrid
timeDict['relBranchOrder'] = relBranchOrder
def findPathLength(timeDict, session, makeB=False, plot=True, smoothWin=50):
""" calculates the path length along the dendrite to all masks 0 is cell body
:param timeDict: time course dictionary
:param session:
:param loadSWC: load data from swc reconstruction file
:return:
"""
get_graph_from_swc(timeDict, session)
get_path_length(timeDict, makeB=makeB, plot=plot)
get_branch_id(timeDict)
get_soma_dendrites(timeDict)
get_branch_to_branch(timeDict, makeB=makeB)
get_mask_to_mask(timeDict, makeB=makeB, smoothWin=smoothWin)
def meanSmooth(x, winSize):
halfWin = int(winSize / 2)
X = np.ones([len(x), winSize + 1]) * np.NAN
for i in range(halfWin):
X[(halfWin - i):, i] = x[:-(halfWin - i)]
X[-(halfWin - i):, i] = np.NAN
X[:, halfWin] = x
for i in range(1, halfWin + 1):
X[:-i, i + halfWin] = x[i:]
X[:i, i + halfWin] = np.NAN
return np.nanmean(X, axis=1)
def get_excluded_index(dendrite_ids, path_length, dendrite_number, dist_threshold=10):
""" checks for discontinuities in distance between reconstruction points along a dendrites and tries to solve them
by either cutting from the first one onwards or cutting from the beginning
:param dendrite_ids: reconstruction points for current dendrite
:param path_length: path length along the dendrite between reconstruction points
:param dendrite_number: dendrite number in session
:param dist_threshold: two reconstruction points with a distance larger then dist count as a discontinuity
:return: reconstruction points after deleting the discontinuities
"""
# get distances between points
dist_dend = []
for i in range(len(dendrite_ids) - 1):
dist_dend.append(path_length[int(dendrite_ids[i]-1), int(dendrite_ids[i + 1]-1)])
large_dist = (np.array(dist_dend) > dist_threshold).nonzero()[0]
n_pairs = len(large_dist) / 2
# try cutting from the first one
exclude_index_1 = []
for i in range(int(n_pairs)):
exclude_index_1.extend(range(large_dist[i * 2] + 1, large_dist[i * 2 + 1] + 1))
if len(large_dist) % 2:
exclude_index_1.extend(range(large_dist[-1] + 1, len(dendrite_ids) + 1))
large_dist2 = np.hstack((0, large_dist))
n_pairs2 = len(large_dist2) / 2
# try cutting from the beginning
exclude_index_2 = []
for i in range(int(n_pairs2)):
exclude_index_2.extend(range(large_dist2[i * 2] + 1, large_dist2[i * 2 + 1] + 1))
if len(large_dist2) % 2:
exclude_index_2.extend(range(large_dist2[-1] + 1, len(dendrite_ids) + 1))
# The better one is the longer one (smaller exclusion points)
if len(exclude_index_1) > 0 or len(exclude_index_2) > 0:
if len(exclude_index_1) < len(exclude_index_2):
if len(exclude_index_1) > 0:
logger.info('Excluded dend Dist 1 %d: %s' % (dendrite_number, exclude_index_1))
dendrite_ids = np.delete(dendrite_ids, exclude_index_1)
elif len(exclude_index_2) > 0:
logger.info('Excluded dend Dist 2 %d: %s' % (dendrite_number, exclude_index_2))
dendrite_ids = np.delete(dendrite_ids, exclude_index_2)
return dendrite_ids
def showDendrites(timeDict, showB=False, exclude=None):
""" helper function to plot the dendrite numbers
:param timeDict: time course dictionary
"""
if not showB:
Masks = timeDict['Masks']
labelimgAll = timeDict['labelimgAll']
dendLabelTable = timeDict['dendLabelTable']
else:
Masks = timeDict['MasksB']
labelimgAll = timeDict['labelimgAllB']
dendLabelTable = timeDict['dendLabelTableB']
plt.figure(figsize=(12, 12))
plt.imshow(labelimgAll.max(axis=2).transpose(1, 0))
DendCenters = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')]['Centers']))
DendPath = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')].index))
pixelSize = timeDict['pixelSize'][0]
for center, dend in zip(DendCenters, DendPath):
plt.text(center[0] / pixelSize + 18, center[1] / pixelSize, str(int(dend)), color='r')
dendNums = np.unique(dendLabelTable[:, 1].T)
for num in dendNums:
if exclude is None or num not in exclude:
current = Masks[(Masks.MaskType == 'Dendrite') & (Masks.DendNum == num)].Centers
XY = (np.array(list(map(list, current.values)))[:, [0, 1]].mean(axis=0) / pixelSize).astype(int)
plt.text(XY[0], XY[1], str(num), size=20, color='white')
def showSpines(timeDict, selected=None, pixSize=376.666, showB=False):
"""
:param timeDict:
:return:
"""
if showB:
masks = timeDict['MasksB']
else:
masks = timeDict['Masks']
shapes = []
annotations = []
for mark in selected:
x = masks.loc[mark].Centers[0] * 1000.0 / pixSize
y = masks.loc[mark].Centers[1] * 1000.0 / pixSize
shapes.append({
'type': 'circle',
'xref': 'x',
'yref': 'y',
'x0': x - 4,
'y0': y - 4,
'x1': x + 4,
'y1': y + 4,
'line': {
'color': 'rgba(50, 171, 96, 1)',
},
})
annotations.append(dict(
x=x,
y=y + 5,
xref='x',
yref='y',
text=str(mark),
font=dict(
family='Courier New, monospace',
size=16,
color='#ffffff'),
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
))
if showB:
labelimgAll = timeDict['labelimgAllB']
else:
labelimgAll = timeDict['labelimgAll']
layout = go.Layout(height=labelimgAll.shape[1] * 2,
width=labelimgAll.shape[0] * 2,
shapes=shapes,
annotations=annotations)
heatmap = labelimgAll.max(axis=2).transpose(1, 0)
trace = go.Heatmap(z=heatmap)
py.iplot(dict(data=[trace], layout=layout))
def get_path_length_grid(timeDict):
"""
:param timeDict:
:return:
"""
Masks = timeDict['Masks']
if isinstance(timeDict['pathLengthAll'], dict):
a = np.zeros((len(timeDict['pathLengthAll']) + 1, len(timeDict['pathLengthAll']) + 1))
for key, value in timeDict['pathLengthAll'].iteritems():
for key2 in sorted(value):
a[int(key), int(key2)] = value[key2]
timeDict['pathLengthAll'] = a
pathLength = timeDict['pathLengthAll']
pathLengthGrid = np.zeros((len(Masks), len(Masks)))
for x in range(len(Masks)):
xNode = int(Masks.loc[x].ParentId)
for y in range(len(Masks)):
yNode = int(Masks.loc[y].ParentId)
if x != y:
pathLengthGrid[x, y] = pathLength[xNode - 1, yNode - 1]
excludeIndex = timeDict['excludeIndex']
somaPathLength = np.array(timeDict['Masks']['PathLength'])
spineIdx = np.array(timeDict['Masks']['MaskType'] == 'Spine').nonzero()[0]
somaPathLength[spineIdx] = somaPathLength[spineIdx]
if np.any(np.isfinite(excludeIndex)):
pathLengthGrid[:, excludeIndex] = np.NAN
# pathLengthGrid[excludeIndex, :] = np.NAN
timeDict['pathLengthGrid'] = pathLengthGrid
def get_high_res_path_length_grid(timeDict, smoothWin=50):
def meanSmooth(x,winSize):
X = np.ones([len(x),winSize+1])*np.NAN
for i in range(winSize/2):
X[((winSize/2)-i):,i]=x[:-((winSize/2)-i)]
X[-(winSize/2-i):,i]=np.NAN
X[:,winSize/2]=x
for i in range(1,winSize/2+1):
X[:-i,i+winSize/2]=x[i:]
X[:i,i+winSize/2]=np.NAN
return np.nanmean(X,axis=1)
All=[]
for x in timeDict['dend']:
if len(x.shape)>1:
All.append(x[:,[1,0,2]])
else:
All.append(x[[1,0,2]])
Masks = timeDict['Masks']
nMasks = len(Masks)
AnatomyCenters = np.array([np.array(x) for x in Masks.Centers])
# find closest point in interpolated space
segList = np.zeros(nMasks)
Dist = np.zeros(nMasks)
for segNum, segment in enumerate(All):
maskIdx = (Masks.DendNum==segNum).values.nonzero()[0]
AnatomyCentersSeg = AnatomyCenters[maskIdx]
if len(segment.shape)<2:
Dist[maskIdx]=np.zeros(len(maskIdx))
segList[maskIdx]=segNum * np.ones(len(maskIdx))
continue
x = segment[:,0]
y = segment[:,1]
z = segment[:,2]
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * timeDict['pixelSize'][0]
y = y * timeDict['pixelSize'][1]
z = z * timeDict['pixelSize'][2]
sx = meanSmooth(x,smoothWin)
sy = meanSmooth(y,smoothWin)
sz = meanSmooth(z,smoothWin)
dist = np.hstack([0,np.cumsum(np.sqrt(np.diff(sx)**2+np.diff(sy)**2+np.diff(sz)**2))])
tree = KDTree(np.array(list(zip(x, y, z))))
distances, indexes = tree.query(AnatomyCentersSeg)
Dist[maskIdx]=dist[indexes]
segList[maskIdx]=segNum * np.ones(len(maskIdx))
pathLengthGrid=np.ones([nMasks,nMasks])*np.NAN
for i in range(nMasks):
for j in range(nMasks):
if segList[i]==segList[j]:
pathLengthGrid[i,j]=np.absolute(Dist[i]-Dist[j])
excludeIndex = timeDict['excludeIndex']
if np.any(np.isfinite(excludeIndex)):
pathLengthGrid[:, excludeIndex] = np.NAN
timeDict['pathLengthGridHR'] = pathLengthGrid
def get_branch_dist(timeDict, plot=False, warn_dist=10):
# Note: Not working most of the time
dend = timeDict['dend']
num_dend = len(dend)
transformInv = loadTransform(timeDict['path'])
pixelSizeSession = timeDict['pixelSize']
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
# find closest point in interpolated space
img = tf.imread(timeDict['SessionPath']).astype(int).transpose(1, 2, 0)
imgSize = img.shape
Info = timeDict['InfoSWC']
All = Info['AllSWC']
Connecting = list([])
segList = list([])
for segNum, segment in enumerate(All):
x, y, z = np.unravel_index(segment, imgSize, 'F')
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * xyStep
y = y * xyStep
z = z * timeDict['anatomyZstep']
if len(x.shape) > 0:
Connecting.extend(list(zip(x, y, z)))
segList.extend([segNum] * x.shape[0])
else:
Connecting.append((x, y, z))
segList.append(segNum)
Connecting = np.array(Connecting)
tree = KDTree(Connecting)
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0] # * xyStep
Table[:, 1] = Table[:, 1] # * xyStep
Table[:, 2] = Table[:, 2] # * timeDict['anatomyZstep']
tree2 = KDTree(Table)
# results in ((point in dend x, point in dend y, dist), dend x, dend y)
results = np.zeros((3, num_dend, num_dend))
for x, y in itertools.product(range(num_dend), range(num_dend)):
dend1 = copy.deepcopy(dend[x]).astype('float')
dend2 = copy.deepcopy(dend[y]).astype('float')
dend1_fov = []
dend2_fov = []
for center in dend1:
point = transformInv.TransformPoint(
(center[2], center[1] * pixelSizeSession[1], center[0] * pixelSizeSession[0]))
dend1_fov.append((point[1], point[2], point[0]))
for center in dend2:
point = transformInv.TransformPoint(
(center[2], center[1] * pixelSizeSession[1], center[0] * pixelSizeSession[0]))
dend2_fov.append((point[1], point[2], point[0]))
distances, indexes = tree.query(dend1_fov)
if np.median(distances) > warn_dist:
logger.info('Median dist high: = %f' % np.median(distances))
distances_dend1, indexes_dend1 = tree2.query(dend1_fov)
Ids_dend1 = Table2[indexes_dend1, 0]
distances_dend2, indexes_dend2 = tree2.query(dend2_fov)
Ids_dend2 = Table2[indexes_dend2, 0]
dendNum_dend2 = Table2[indexes_dend2, -1]
dendNum_dend1 = Table2[indexes_dend1, -1]
test = copy.deepcopy(dendNum_dend1)
uD, counts = np.unique(test, return_counts=True)
exIdx = []
for k in uD:
check = (test == k).nonzero()[0]
if len(check) > 1:
if np.max(np.diff(check)) > 1:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
else:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
if len(exIdx) > 0:
logger.info('Excluded dend1: %s' % exIdx)
Ids_dend1 = np.delete(Ids_dend1, exIdx)
test = copy.deepcopy(dendNum_dend2)
uD, counts = np.unique(test, return_counts=True)
exIdx = []
for k in uD:
check = (test == k).nonzero()[0]
if len(check) > 1:
if np.max(np.diff(check)) > 1:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
else:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
if len(exIdx) > 0:
logger.info('Excluded dend2: %s' % exIdx)
Ids_dend2 = np.delete(Ids_dend2, exIdx)
Ids_dend2 = np.unique(Ids_dend2).astype('int')
Ids_dend1 = np.unique(Ids_dend1).astype('int')
dist = np.inf
dend1_close = 0
dend2_close = 0
for x2, y2 in itertools.product(Ids_dend1, Ids_dend2):
if timeDict['pathLengthAll'][x2][y2] < dist:
dist = timeDict['pathLengthAll'][x2][y2]
dend1_close = x2
dend2_close = y2
if np.isfinite(dist):
Ids_dend1_best = np.where(Table2[indexes_dend1, 0] == dend1_close)[0]
dend1_best_dist = distances_dend1[Ids_dend1_best]
dend1_best_offset = np.argmin(dend1_best_dist)
dend1_best_point = Ids_dend1_best[dend1_best_offset]
Ids_dend2_best = np.where(Table2[indexes_dend2, 0] == dend2_close)[0]
dend2_best_dist = distances_dend2[Ids_dend2_best]
dend2_best_offset = np.argmin(dend2_best_dist)
dend2_best_point = Ids_dend2_best[dend2_best_offset]
results[:, x, y] = np.array([dend1_best_point, dend2_best_point, dist])
else:
results[:, x, y] = np.array([np.nan, np.nan, dist])
if plot:
plt.imshow(results[2, :, :])
plt.colorbar(label='Distance')
plt.xlabel('Dend#')
plt.ylabel('Dend#')
return results
def get_mask_index(timeDict, mask='Spine', use_B=False, noise_th=None):
"""
:param timeDict: timeDict to use
:param mask: options are 'Spine' and 'Dendrite'
:param use_B: Make masksB etc.
:param noise_th: if None will return all mask index if float will return mean noise < then threshold
:return: index of masks
"""
if use_B:
b = 'B'
else:
b = ''
masks = timeDict['Masks' + b]
exclude = timeDict['excludeIndex' + b]
indexs_all = np.where(masks.MaskType == mask)[0]
indexs_good = np.setdiff1d(indexs_all, exclude)
if noise_th is not None:
noise = np.nanmean(timeDict['TCNoise' + b], axis=1)
good_noise = np.where(noise < noise_th)[0]
return np.intersect1d(indexs_good, good_noise)
else:
return indexs_good
| 41.90715 | 119 | 0.585492 |
acf16565a01dfc76b18d0c901b336f2ce5d70469 | 129,866 | py | Python | test/sql/test_types.py | zcattacz/sqlalchemy | 882afe4c3718f741800809003c6c465d835de213 | [
"MIT"
] | null | null | null | test/sql/test_types.py | zcattacz/sqlalchemy | 882afe4c3718f741800809003c6c465d835de213 | [
"MIT"
] | null | null | null | test/sql/test_types.py | zcattacz/sqlalchemy | 882afe4c3718f741800809003c6c465d835de213 | [
"MIT"
] | null | null | null | # coding: utf-8
import datetime
import decimal
import importlib
import operator
import os
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import ARRAY
from sqlalchemy import BigInteger
from sqlalchemy import bindparam
from sqlalchemy import BLOB
from sqlalchemy import BOOLEAN
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import CHAR
from sqlalchemy import CLOB
from sqlalchemy import DATE
from sqlalchemy import Date
from sqlalchemy import DATETIME
from sqlalchemy import DateTime
from sqlalchemy import DECIMAL
from sqlalchemy import dialects
from sqlalchemy import distinct
from sqlalchemy import Double
from sqlalchemy import Enum
from sqlalchemy import exc
from sqlalchemy import FLOAT
from sqlalchemy import Float
from sqlalchemy import func
from sqlalchemy import inspection
from sqlalchemy import INTEGER
from sqlalchemy import Integer
from sqlalchemy import Interval
from sqlalchemy import JSON
from sqlalchemy import LargeBinary
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import NCHAR
from sqlalchemy import NUMERIC
from sqlalchemy import Numeric
from sqlalchemy import NVARCHAR
from sqlalchemy import PickleType
from sqlalchemy import REAL
from sqlalchemy import select
from sqlalchemy import SMALLINT
from sqlalchemy import SmallInteger
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import TIME
from sqlalchemy import Time
from sqlalchemy import TIMESTAMP
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import types
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy import VARCHAR
import sqlalchemy.dialects.mysql as mysql
import sqlalchemy.dialects.oracle as oracle
import sqlalchemy.dialects.postgresql as pg
from sqlalchemy.engine import default
from sqlalchemy.schema import AddConstraint
from sqlalchemy.schema import CheckConstraint
from sqlalchemy.sql import column
from sqlalchemy.sql import ddl
from sqlalchemy.sql import elements
from sqlalchemy.sql import null
from sqlalchemy.sql import operators
from sqlalchemy.sql import sqltypes
from sqlalchemy.sql import table
from sqlalchemy.sql import visitors
from sqlalchemy.sql.sqltypes import TypeEngine
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing import mock
from sqlalchemy.testing import pickleable
from sqlalchemy.testing.assertions import expect_raises_message
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import pep435_enum
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import picklers
def _all_dialect_modules():
return [
importlib.import_module("sqlalchemy.dialects.%s" % d)
for d in dialects.__all__
if not d.startswith("_")
]
def _all_dialects():
return [d.base.dialect() for d in _all_dialect_modules()]
def _types_for_mod(mod):
for key in dir(mod):
typ = getattr(mod, key)
if not isinstance(typ, type) or not issubclass(typ, types.TypeEngine):
continue
yield typ
def _all_types(omit_special_types=False):
seen = set()
for typ in _types_for_mod(types):
if omit_special_types and typ in (
types.TypeDecorator,
types.TypeEngine,
types.Variant,
):
continue
if typ in seen:
continue
seen.add(typ)
yield typ
for dialect in _all_dialect_modules():
for typ in _types_for_mod(dialect):
if typ in seen:
continue
seen.add(typ)
yield typ
class AdaptTest(fixtures.TestBase):
@testing.combinations(((t,) for t in _types_for_mod(types)), id_="n")
def test_uppercase_importable(self, typ):
if typ.__name__ == typ.__name__.upper():
assert getattr(sa, typ.__name__) is typ
assert typ.__name__ in dir(types)
@testing.combinations(
((d.name, d) for d in _all_dialects()), argnames="dialect", id_="ia"
)
@testing.combinations(
(REAL(), "REAL"),
(FLOAT(), "FLOAT"),
(NUMERIC(), "NUMERIC"),
(DECIMAL(), "DECIMAL"),
(INTEGER(), "INTEGER"),
(SMALLINT(), "SMALLINT"),
(TIMESTAMP(), ("TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE")),
(DATETIME(), "DATETIME"),
(DATE(), "DATE"),
(TIME(), ("TIME", "TIME WITHOUT TIME ZONE")),
(CLOB(), "CLOB"),
(VARCHAR(10), ("VARCHAR(10)", "VARCHAR(10 CHAR)")),
(
NVARCHAR(10),
("NVARCHAR(10)", "NATIONAL VARCHAR(10)", "NVARCHAR2(10)"),
),
(CHAR(), "CHAR"),
(NCHAR(), ("NCHAR", "NATIONAL CHAR")),
(BLOB(), ("BLOB", "BLOB SUB_TYPE 0")),
(BOOLEAN(), ("BOOLEAN", "BOOL", "INTEGER")),
argnames="type_, expected",
id_="ra",
)
def test_uppercase_rendering(self, dialect, type_, expected):
"""Test that uppercase types from types.py always render as their
type.
As of SQLA 0.6, using an uppercase type means you want specifically
that type. If the database in use doesn't support that DDL, it (the DB
backend) should raise an error - it means you should be using a
lowercased (genericized) type.
"""
if isinstance(expected, str):
expected = (expected,)
try:
compiled = type_.compile(dialect=dialect)
except NotImplementedError:
return
assert compiled in expected, "%r matches none of %r for dialect %s" % (
compiled,
expected,
dialect.name,
)
assert (
str(types.to_instance(type_)) in expected
), "default str() of type %r not expected, %r" % (type_, expected)
def _adaptions():
for typ in _all_types(omit_special_types=True):
# up adapt from LowerCase to UPPERCASE,
# as well as to all non-sqltypes
up_adaptions = [typ] + typ.__subclasses__()
yield "%s.%s" % (
typ.__module__,
typ.__name__,
), False, typ, up_adaptions
for subcl in typ.__subclasses__():
if (
subcl is not typ
and typ is not TypeDecorator
and "sqlalchemy" in subcl.__module__
):
yield "%s.%s" % (
subcl.__module__,
subcl.__name__,
), True, subcl, [typ]
@testing.combinations(_adaptions(), id_="iaaa")
def test_adapt_method(self, is_down_adaption, typ, target_adaptions):
"""ensure all types have a working adapt() method,
which creates a distinct copy.
The distinct copy ensures that when we cache
the adapted() form of a type against the original
in a weak key dictionary, a cycle is not formed.
This test doesn't test type-specific arguments of
adapt() beyond their defaults.
"""
if issubclass(typ, ARRAY):
t1 = typ(String)
else:
t1 = typ()
for cls in target_adaptions:
if (is_down_adaption and issubclass(typ, sqltypes.Emulated)) or (
not is_down_adaption and issubclass(cls, sqltypes.Emulated)
):
continue
# print("ADAPT %s -> %s" % (t1.__class__, cls))
t2 = t1.adapt(cls)
assert t1 is not t2
if is_down_adaption:
t2, t1 = t1, t2
for k in t1.__dict__:
if k in (
"impl",
"_is_oracle_number",
"_create_events",
"create_constraint",
"inherit_schema",
"schema",
"metadata",
"name",
):
continue
# assert each value was copied, or that
# the adapted type has a more specific
# value than the original (i.e. SQL Server
# applies precision=24 for REAL)
assert (
getattr(t2, k) == t1.__dict__[k] or t1.__dict__[k] is None
)
eq_(t1.evaluates_none().should_evaluate_none, True)
def test_python_type(self):
eq_(types.Integer().python_type, int)
eq_(types.Numeric().python_type, decimal.Decimal)
eq_(types.Numeric(asdecimal=False).python_type, float)
eq_(types.LargeBinary().python_type, bytes)
eq_(types.Float().python_type, float)
eq_(types.Double().python_type, float)
eq_(types.Interval().python_type, datetime.timedelta)
eq_(types.Date().python_type, datetime.date)
eq_(types.DateTime().python_type, datetime.datetime)
eq_(types.String().python_type, str)
eq_(types.Unicode().python_type, str)
eq_(types.Enum("one", "two", "three").python_type, str)
assert_raises(
NotImplementedError, lambda: types.TypeEngine().python_type
)
@testing.uses_deprecated()
@testing.combinations(*[(t,) for t in _all_types(omit_special_types=True)])
def test_repr(self, typ):
if issubclass(typ, ARRAY):
t1 = typ(String)
else:
t1 = typ()
repr(t1)
@testing.uses_deprecated()
@testing.combinations(*[(t,) for t in _all_types(omit_special_types=True)])
def test_str(self, typ):
if issubclass(typ, ARRAY):
t1 = typ(String)
else:
t1 = typ()
str(t1)
def test_str_third_party(self):
class TINYINT(types.TypeEngine):
__visit_name__ = "TINYINT"
eq_(str(TINYINT()), "TINYINT")
def test_str_third_party_uppercase_no_visit_name(self):
class TINYINT(types.TypeEngine):
pass
eq_(str(TINYINT()), "TINYINT")
def test_str_third_party_camelcase_no_visit_name(self):
class TinyInt(types.TypeEngine):
pass
eq_(str(TinyInt()), "TinyInt()")
def test_adapt_constructor_copy_override_kw(self):
"""test that adapt() can accept kw args that override
the state of the original object.
This essentially is testing the behavior of util.constructor_copy().
"""
t1 = String(length=50)
t2 = t1.adapt(Text)
eq_(t2.length, 50)
class TypeAffinityTest(fixtures.TestBase):
@testing.combinations(
(String(), String),
(VARCHAR(), String),
(Date(), Date),
(LargeBinary(), types._Binary),
id_="rn",
)
def test_type_affinity(self, type_, affin):
eq_(type_._type_affinity, affin)
@testing.combinations(
(Integer(), SmallInteger(), True),
(Integer(), String(), False),
(Integer(), Integer(), True),
(Text(), String(), True),
(Text(), Unicode(), True),
(LargeBinary(), Integer(), False),
(LargeBinary(), PickleType(), True),
(PickleType(), LargeBinary(), True),
(PickleType(), PickleType(), True),
id_="rra",
)
def test_compare_type_affinity(self, t1, t2, comp):
eq_(t1._compare_type_affinity(t2), comp, "%s %s" % (t1, t2))
def test_decorator_doesnt_cache(self):
from sqlalchemy.dialects import postgresql
class MyType(TypeDecorator):
impl = CHAR
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "postgresql":
return dialect.type_descriptor(postgresql.UUID())
else:
return dialect.type_descriptor(CHAR(32))
t1 = MyType()
d = postgresql.dialect()
assert t1._type_affinity is String
assert t1.dialect_impl(d)._type_affinity is postgresql.UUID
class AsGenericTest(fixtures.TestBase):
@testing.combinations(
(String(), String()),
(VARCHAR(length=100), String(length=100)),
(NVARCHAR(length=100), Unicode(length=100)),
(DATE(), Date()),
(pg.JSON(), sa.JSON()),
(pg.ARRAY(sa.String), sa.ARRAY(sa.String)),
(Enum("a", "b", "c"), Enum("a", "b", "c")),
(pg.ENUM("a", "b", "c"), Enum("a", "b", "c")),
(mysql.ENUM("a", "b", "c"), Enum("a", "b", "c")),
(pg.INTERVAL(precision=5), Interval(native=True, second_precision=5)),
(
oracle.INTERVAL(second_precision=5, day_precision=5),
Interval(native=True, day_precision=5, second_precision=5),
),
)
def test_as_generic(self, t1, t2):
assert repr(t1.as_generic(allow_nulltype=False)) == repr(t2)
@testing.combinations(
*[
(t,)
for t in _all_types(omit_special_types=True)
if not util.method_is_overridden(t, TypeEngine.as_generic)
]
)
def test_as_generic_all_types_heuristic(self, type_):
if issubclass(type_, ARRAY):
t1 = type_(String)
else:
t1 = type_()
try:
gentype = t1.as_generic()
except NotImplementedError:
pass
else:
assert isinstance(t1, gentype.__class__)
assert isinstance(gentype, TypeEngine)
gentype = t1.as_generic(allow_nulltype=True)
if not isinstance(gentype, types.NULLTYPE.__class__):
assert isinstance(t1, gentype.__class__)
assert isinstance(gentype, TypeEngine)
@testing.combinations(
*[
(t,)
for t in _all_types(omit_special_types=True)
if util.method_is_overridden(t, TypeEngine.as_generic)
]
)
def test_as_generic_all_types_custom(self, type_):
if issubclass(type_, ARRAY):
t1 = type_(String)
else:
t1 = type_()
gentype = t1.as_generic(allow_nulltype=False)
assert isinstance(gentype, TypeEngine)
class PickleTypesTest(fixtures.TestBase):
@testing.combinations(
("Boo", Boolean()),
("Str", String()),
("Tex", Text()),
("Uni", Unicode()),
("Int", Integer()),
("Sma", SmallInteger()),
("Big", BigInteger()),
("Num", Numeric()),
("Flo", Float()),
("Dat", DateTime()),
("Dat", Date()),
("Tim", Time()),
("Lar", LargeBinary()),
("Pic", PickleType()),
("Int", Interval()),
id_="ar",
)
def test_pickle_types(self, name, type_):
column_type = Column(name, type_)
meta = MetaData()
Table("foo", meta, column_type)
for loads, dumps in picklers():
loads(dumps(column_type))
loads(dumps(meta))
class _UserDefinedTypeFixture:
@classmethod
def define_tables(cls, metadata):
class MyType(types.UserDefinedType):
def get_col_spec(self):
return "VARCHAR(100)"
def bind_processor(self, dialect):
def process(value):
if value is None:
value = "<null value>"
return "BIND_IN" + value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value + "BIND_OUT"
return process
def adapt(self, typeobj):
return typeobj()
class MyDecoratedType(types.TypeDecorator):
impl = String
cache_ok = True
def bind_processor(self, dialect):
impl_processor = super(MyDecoratedType, self).bind_processor(
dialect
) or (lambda value: value)
def process(value):
if value is None:
value = "<null value>"
return "BIND_IN" + impl_processor(value)
return process
def result_processor(self, dialect, coltype):
impl_processor = super(MyDecoratedType, self).result_processor(
dialect, coltype
) or (lambda value: value)
def process(value):
return impl_processor(value) + "BIND_OUT"
return process
def copy(self):
return MyDecoratedType()
class MyNewUnicodeType(types.TypeDecorator):
impl = Unicode
cache_ok = True
def process_bind_param(self, value, dialect):
if value is None:
value = "<null value>"
return "BIND_IN" + value
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
def copy(self):
return MyNewUnicodeType(self.impl.length)
class MyNewIntType(types.TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
if value is None:
value = 29
return value * 10
def process_result_value(self, value, dialect):
return value * 10
def copy(self):
return MyNewIntType()
class MyNewIntSubClass(MyNewIntType):
def process_result_value(self, value, dialect):
return value * 15
def copy(self):
return MyNewIntSubClass()
class MyUnicodeType(types.TypeDecorator):
impl = Unicode
cache_ok = True
def bind_processor(self, dialect):
impl_processor = super(MyUnicodeType, self).bind_processor(
dialect
) or (lambda value: value)
def process(value):
if value is None:
value = "<null value>"
return "BIND_IN" + impl_processor(value)
return process
def result_processor(self, dialect, coltype):
impl_processor = super(MyUnicodeType, self).result_processor(
dialect, coltype
) or (lambda value: value)
def process(value):
return impl_processor(value) + "BIND_OUT"
return process
def copy(self):
return MyUnicodeType(self.impl.length)
class MyDecOfDec(types.TypeDecorator):
impl = MyNewIntType
cache_ok = True
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True),
# totall custom type
Column("goofy", MyType, nullable=False),
# decorated type with an argument, so its a String
Column("goofy2", MyDecoratedType(50), nullable=False),
Column("goofy4", MyUnicodeType(50), nullable=False),
Column("goofy7", MyNewUnicodeType(50), nullable=False),
Column("goofy8", MyNewIntType, nullable=False),
Column("goofy9", MyNewIntSubClass, nullable=False),
Column("goofy10", MyDecOfDec, nullable=False),
)
class UserDefinedRoundTripTest(_UserDefinedTypeFixture, fixtures.TablesTest):
__backend__ = True
def _data_fixture(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
dict(
user_id=2,
goofy="jack",
goofy2="jack",
goofy4="jack",
goofy7="jack",
goofy8=12,
goofy9=12,
goofy10=12,
),
)
connection.execute(
users.insert(),
dict(
user_id=3,
goofy="lala",
goofy2="lala",
goofy4="lala",
goofy7="lala",
goofy8=15,
goofy9=15,
goofy10=15,
),
)
connection.execute(
users.insert(),
dict(
user_id=4,
goofy="fred",
goofy2="fred",
goofy4="fred",
goofy7="fred",
goofy8=9,
goofy9=9,
goofy10=9,
),
)
connection.execute(
users.insert(),
dict(
user_id=5,
goofy=None,
goofy2=None,
goofy4=None,
goofy7=None,
goofy8=None,
goofy9=None,
goofy10=None,
),
)
def test_processing(self, connection):
users = self.tables.users
self._data_fixture(connection)
result = connection.execute(
users.select().order_by(users.c.user_id)
).fetchall()
eq_(
result,
[
(
2,
"BIND_INjackBIND_OUT",
"BIND_INjackBIND_OUT",
"BIND_INjackBIND_OUT",
"BIND_INjackBIND_OUT",
1200,
1800,
1200,
),
(
3,
"BIND_INlalaBIND_OUT",
"BIND_INlalaBIND_OUT",
"BIND_INlalaBIND_OUT",
"BIND_INlalaBIND_OUT",
1500,
2250,
1500,
),
(
4,
"BIND_INfredBIND_OUT",
"BIND_INfredBIND_OUT",
"BIND_INfredBIND_OUT",
"BIND_INfredBIND_OUT",
900,
1350,
900,
),
(
5,
"BIND_IN<null value>BIND_OUT",
"BIND_IN<null value>BIND_OUT",
"BIND_IN<null value>BIND_OUT",
"BIND_IN<null value>BIND_OUT",
2900,
4350,
2900,
),
],
)
def test_plain_in_typedec(self, connection):
users = self.tables.users
self._data_fixture(connection)
stmt = (
select(users.c.user_id, users.c.goofy8)
.where(users.c.goofy8.in_([15, 9]))
.order_by(users.c.user_id)
)
result = connection.execute(stmt, {"goofy": [15, 9]})
eq_(result.fetchall(), [(3, 1500), (4, 900)])
def test_plain_in_typedec_of_typedec(self, connection):
users = self.tables.users
self._data_fixture(connection)
stmt = (
select(users.c.user_id, users.c.goofy10)
.where(users.c.goofy10.in_([15, 9]))
.order_by(users.c.user_id)
)
result = connection.execute(stmt, {"goofy": [15, 9]})
eq_(result.fetchall(), [(3, 1500), (4, 900)])
def test_expanding_in_typedec(self, connection):
users = self.tables.users
self._data_fixture(connection)
stmt = (
select(users.c.user_id, users.c.goofy8)
.where(users.c.goofy8.in_(bindparam("goofy", expanding=True)))
.order_by(users.c.user_id)
)
result = connection.execute(stmt, {"goofy": [15, 9]})
eq_(result.fetchall(), [(3, 1500), (4, 900)])
def test_expanding_in_typedec_of_typedec(self, connection):
users = self.tables.users
self._data_fixture(connection)
stmt = (
select(users.c.user_id, users.c.goofy10)
.where(users.c.goofy10.in_(bindparam("goofy", expanding=True)))
.order_by(users.c.user_id)
)
result = connection.execute(stmt, {"goofy": [15, 9]})
eq_(result.fetchall(), [(3, 1500), (4, 900)])
class BindProcessorInsertValuesTest(UserDefinedRoundTripTest):
"""related to #6770, test that insert().values() applies to
bound parameter handlers including the None value."""
__backend__ = True
def _data_fixture(self, connection):
users = self.tables.users
connection.execute(
users.insert().values(
user_id=2,
goofy="jack",
goofy2="jack",
goofy4="jack",
goofy7="jack",
goofy8=12,
goofy9=12,
goofy10=12,
),
)
connection.execute(
users.insert().values(
user_id=3,
goofy="lala",
goofy2="lala",
goofy4="lala",
goofy7="lala",
goofy8=15,
goofy9=15,
goofy10=15,
),
)
connection.execute(
users.insert().values(
user_id=4,
goofy="fred",
goofy2="fred",
goofy4="fred",
goofy7="fred",
goofy8=9,
goofy9=9,
goofy10=9,
),
)
connection.execute(
users.insert().values(
user_id=5,
goofy=None,
goofy2=None,
goofy4=None,
goofy7=None,
goofy8=None,
goofy9=None,
goofy10=None,
),
)
class UserDefinedTest(
_UserDefinedTypeFixture, fixtures.TablesTest, AssertsCompiledSQL
):
run_create_tables = None
run_inserts = None
run_deletes = None
"""tests user-defined types."""
def test_typedecorator_literal_render(self):
class MyType(types.TypeDecorator):
impl = String
cache_ok = True
def process_literal_param(self, value, dialect):
return "HI->%s<-THERE" % value
self.assert_compile(
select(literal("test", MyType)),
"SELECT 'HI->test<-THERE' AS anon_1",
dialect="default",
literal_binds=True,
)
def test_kw_colspec(self):
class MyType(types.UserDefinedType):
def get_col_spec(self, **kw):
return "FOOB %s" % kw["type_expression"].name
class MyOtherType(types.UserDefinedType):
def get_col_spec(self):
return "BAR"
t = Table("t", MetaData(), Column("bar", MyType, nullable=False))
self.assert_compile(ddl.CreateColumn(t.c.bar), "bar FOOB bar NOT NULL")
t = Table("t", MetaData(), Column("bar", MyOtherType, nullable=False))
self.assert_compile(ddl.CreateColumn(t.c.bar), "bar BAR NOT NULL")
def test_typedecorator_literal_render_fallback_bound(self):
# fall back to process_bind_param for literal
# value rendering.
class MyType(types.TypeDecorator):
impl = String
cache_ok = True
def process_bind_param(self, value, dialect):
return "HI->%s<-THERE" % value
self.assert_compile(
select(literal("test", MyType)),
"SELECT 'HI->test<-THERE' AS anon_1",
dialect="default",
literal_binds=True,
)
def test_typedecorator_impl(self):
for impl_, exp, kw in [
(Float, "FLOAT", {}),
(Float, "FLOAT(2)", {"precision": 2}),
(Float(2), "FLOAT(2)", {"precision": 4}),
(Numeric(19, 2), "NUMERIC(19, 2)", {}),
]:
for dialect_ in (
dialects.postgresql,
dialects.mssql,
dialects.mysql,
):
dialect_ = dialect_.dialect()
raw_impl = types.to_instance(impl_, **kw)
class MyType(types.TypeDecorator):
impl = impl_
cache_ok = True
dec_type = MyType(**kw)
eq_(dec_type.impl.__class__, raw_impl.__class__)
raw_dialect_impl = raw_impl.dialect_impl(dialect_)
dec_dialect_impl = dec_type.dialect_impl(dialect_)
eq_(dec_dialect_impl.__class__, MyType)
eq_(
raw_dialect_impl.__class__, dec_dialect_impl.impl.__class__
)
self.assert_compile(MyType(**kw), exp, dialect=dialect_)
def test_user_defined_typedec_impl(self):
class MyType(types.TypeDecorator):
impl = Float
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "sqlite":
return String(50)
else:
return super(MyType, self).load_dialect_impl(dialect)
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
self.assert_compile(t, "VARCHAR(50)", dialect=sl)
self.assert_compile(t, "FLOAT", dialect=pg)
eq_(
t.dialect_impl(dialect=sl).impl.__class__,
String().dialect_impl(dialect=sl).__class__,
)
eq_(
t.dialect_impl(dialect=pg).impl.__class__,
Float().dialect_impl(pg).__class__,
)
@testing.combinations((Boolean,), (Enum,))
def test_typedecorator_schematype_constraint(self, typ):
class B(TypeDecorator):
impl = typ
cache_ok = True
t1 = Table("t1", MetaData(), Column("q", B(create_constraint=True)))
eq_(
len([c for c in t1.constraints if isinstance(c, CheckConstraint)]),
1,
)
def test_type_decorator_repr(self):
class MyType(TypeDecorator):
impl = VARCHAR
cache_ok = True
eq_(repr(MyType(45)), "MyType(length=45)")
def test_user_defined_typedec_impl_bind(self):
class TypeOne(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " ONE"
return go
class TypeTwo(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " TWO"
return go
class MyType(types.TypeDecorator):
impl = TypeOne
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "sqlite":
return TypeOne()
else:
return TypeTwo()
def process_bind_param(self, value, dialect):
return "MYTYPE " + value
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
eq_(t._cached_bind_processor(sl)("foo"), "MYTYPE foo ONE")
eq_(t._cached_bind_processor(pg)("foo"), "MYTYPE foo TWO")
def test_user_defined_dialect_specific_args(self):
class MyType(types.UserDefinedType):
def __init__(self, foo="foo", **kwargs):
super(MyType, self).__init__()
self.foo = foo
self.dialect_specific_args = kwargs
def adapt(self, cls):
return cls(foo=self.foo, **self.dialect_specific_args)
t = MyType(bar="bar")
a = t.dialect_impl(testing.db.dialect)
eq_(a.foo, "foo")
eq_(a.dialect_specific_args["bar"], "bar")
class TypeCoerceCastTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
class MyType(types.TypeDecorator):
impl = String(50)
cache_ok = True
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
cls.MyType = MyType
Table("t", metadata, Column("data", String(50)))
def test_insert_round_trip_cast(self, connection):
self._test_insert_round_trip(cast, connection)
def test_insert_round_trip_type_coerce(self, connection):
self._test_insert_round_trip(type_coerce, connection)
def _test_insert_round_trip(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(select(coerce_fn(t.c.data, MyType))).fetchall(),
[("BIND_INd1BIND_OUT",)],
)
def test_coerce_from_nulltype_cast(self, connection):
self._test_coerce_from_nulltype(cast, connection)
def test_coerce_from_nulltype_type_coerce(self, connection):
self._test_coerce_from_nulltype(type_coerce, connection)
def _test_coerce_from_nulltype(self, coerce_fn, conn):
MyType = self.MyType
# test coerce from nulltype - e.g. use an object that
# doesn't match to a known type
class MyObj:
def __str__(self):
return "THISISMYOBJ"
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn(MyObj(), MyType)))
eq_(
conn.execute(select(coerce_fn(t.c.data, MyType))).fetchall(),
[("BIND_INTHISISMYOBJBIND_OUT",)],
)
def test_vs_non_coerced_cast(self, connection):
self._test_vs_non_coerced(cast, connection)
def test_vs_non_coerced_type_coerce(self, connection):
self._test_vs_non_coerced(type_coerce, connection)
def _test_vs_non_coerced(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType))
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_vs_non_coerced_alias_cast(self, connection):
self._test_vs_non_coerced_alias(cast, connection)
def test_vs_non_coerced_alias_type_coerce(self, connection):
self._test_vs_non_coerced_alias(type_coerce, connection)
def _test_vs_non_coerced_alias(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(
select(t.c.data.label("x"), coerce_fn(t.c.data, MyType))
.alias()
.select()
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_vs_non_coerced_where_cast(self, connection):
self._test_vs_non_coerced_where(cast, connection)
def test_vs_non_coerced_where_type_coerce(self, connection):
self._test_vs_non_coerced_where(type_coerce, connection)
def _test_vs_non_coerced_where(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
# coerce on left side
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
coerce_fn(t.c.data, MyType) == "d1"
)
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
# coerce on right side
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
t.c.data == coerce_fn("d1", MyType)
)
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_coerce_none_cast(self, connection):
self._test_coerce_none(cast, connection)
def test_coerce_none_type_coerce(self, connection):
self._test_coerce_none(type_coerce, connection)
def _test_coerce_none(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
t.c.data == coerce_fn(None, MyType)
)
).fetchall(),
[],
)
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
coerce_fn(t.c.data, MyType) == None
)
).fetchall(), # noqa
[],
)
def test_resolve_clause_element_cast(self, connection):
self._test_resolve_clause_element(cast, connection)
def test_resolve_clause_element_type_coerce(self, connection):
self._test_resolve_clause_element(type_coerce, connection)
def _test_resolve_clause_element(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
class MyFoob:
def __clause_element__(self):
return t.c.data
eq_(
conn.execute(
select(t.c.data, coerce_fn(MyFoob(), MyType))
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_cast_replace_col_w_bind(self, connection):
self._test_replace_col_w_bind(cast, connection)
def test_type_coerce_replace_col_w_bind(self, connection):
self._test_replace_col_w_bind(type_coerce, connection)
def _test_replace_col_w_bind(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
stmt = select(t.c.data, coerce_fn(t.c.data, MyType))
def col_to_bind(col):
if col is t.c.data:
return bindparam(None, "x", type_=col.type, unique=True)
return None
# ensure we evaluate the expression so that we can see
# the clone resets this info
stmt.compile()
new_stmt = visitors.replacement_traverse(stmt, {}, col_to_bind)
# original statement
eq_(
conn.execute(stmt).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
# replaced with binds; CAST can't affect the bound parameter
# on the way in here
eq_(
conn.execute(new_stmt).fetchall(),
[("x", "BIND_INxBIND_OUT")]
if coerce_fn is type_coerce
else [("x", "xBIND_OUT")],
)
def test_cast_bind(self, connection):
self._test_bind(cast, connection)
def test_type_bind(self, connection):
self._test_bind(type_coerce, connection)
def _test_bind(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
stmt = select(
bindparam(None, "x", String(50), unique=True),
coerce_fn(bindparam(None, "x", String(50), unique=True), MyType),
)
eq_(
conn.execute(stmt).fetchall(),
[("x", "BIND_INxBIND_OUT")]
if coerce_fn is type_coerce
else [("x", "xBIND_OUT")],
)
def test_cast_existing_typed(self, connection):
MyType = self.MyType
coerce_fn = cast
# when cast() is given an already typed value,
# the type does not take effect on the value itself.
eq_(
connection.scalar(select(coerce_fn(literal("d1"), MyType))),
"d1BIND_OUT",
)
def test_type_coerce_existing_typed(self, connection):
MyType = self.MyType
coerce_fn = type_coerce
t = self.tables.t
# type_coerce does upgrade the given expression to the
# given type.
connection.execute(
t.insert().values(data=coerce_fn(literal("d1"), MyType))
)
eq_(
connection.execute(select(coerce_fn(t.c.data, MyType))).fetchall(),
[("BIND_INd1BIND_OUT",)],
)
class VariantBackendTest(fixtures.TestBase, AssertsCompiledSQL):
__backend__ = True
@testing.fixture
def variant_roundtrip(self, metadata, connection):
def run(datatype, data, assert_data):
t = Table(
"t",
metadata,
Column("data", datatype),
)
t.create(connection)
connection.execute(t.insert(), [{"data": elem} for elem in data])
eq_(
connection.execute(select(t).order_by(t.c.data)).all(),
[(elem,) for elem in assert_data],
)
eq_(
# test an IN, which in 1.4 is an expanding
connection.execute(
select(t).where(t.c.data.in_(data)).order_by(t.c.data)
).all(),
[(elem,) for elem in assert_data],
)
return run
def test_type_decorator_variant_one_roundtrip(self, variant_roundtrip):
class Foo(TypeDecorator):
impl = String(50)
cache_ok = True
if testing.against("postgresql"):
data = [5, 6, 10]
else:
data = ["five", "six", "ten"]
variant_roundtrip(
Foo().with_variant(Integer, "postgresql"), data, data
)
def test_type_decorator_variant_two(self, variant_roundtrip):
class UTypeOne(types.UserDefinedType):
def get_col_spec(self):
return "VARCHAR(50)"
def bind_processor(self, dialect):
def process(value):
return value + "UONE"
return process
class UTypeTwo(types.UserDefinedType):
def get_col_spec(self):
return "VARCHAR(50)"
def bind_processor(self, dialect):
def process(value):
return value + "UTWO"
return process
variant = UTypeOne()
for db in ["postgresql", "mysql", "mariadb"]:
variant = variant.with_variant(UTypeTwo(), db)
class Foo(TypeDecorator):
impl = variant
cache_ok = True
if testing.against("postgresql"):
data = assert_data = [5, 6, 10]
elif testing.against("mysql") or testing.against("mariadb"):
data = ["five", "six", "ten"]
assert_data = ["fiveUTWO", "sixUTWO", "tenUTWO"]
else:
data = ["five", "six", "ten"]
assert_data = ["fiveUONE", "sixUONE", "tenUONE"]
variant_roundtrip(
Foo().with_variant(Integer, "postgresql"), data, assert_data
)
def test_type_decorator_variant_three(self, variant_roundtrip):
class Foo(TypeDecorator):
impl = String
cache_ok = True
if testing.against("postgresql"):
data = ["five", "six", "ten"]
else:
data = [5, 6, 10]
variant_roundtrip(
Integer().with_variant(Foo(), "postgresql"), data, data
)
def test_type_decorator_compile_variant_one(self):
class Foo(TypeDecorator):
impl = String
cache_ok = True
self.assert_compile(
Foo().with_variant(Integer, "sqlite"),
"INTEGER",
dialect=dialects.sqlite.dialect(),
)
self.assert_compile(
Foo().with_variant(Integer, "sqlite"),
"VARCHAR",
dialect=dialects.postgresql.dialect(),
)
def test_type_decorator_compile_variant_two(self):
class UTypeOne(types.UserDefinedType):
cache_ok = True
def get_col_spec(self):
return "UTYPEONE"
def bind_processor(self, dialect):
def process(value):
return value + "UONE"
return process
class UTypeTwo(types.UserDefinedType):
cache_ok = True
def get_col_spec(self):
return "UTYPETWO"
def bind_processor(self, dialect):
def process(value):
return value + "UTWO"
return process
variant = UTypeOne().with_variant(UTypeTwo(), "postgresql")
class Foo(TypeDecorator):
impl = variant
cache_ok = True
self.assert_compile(
Foo().with_variant(Integer, "sqlite"),
"INTEGER",
dialect=dialects.sqlite.dialect(),
)
self.assert_compile(
Foo().with_variant(Integer, "sqlite"),
"UTYPETWO",
dialect=dialects.postgresql.dialect(),
)
def test_type_decorator_compile_variant_three(self):
class Foo(TypeDecorator):
impl = String
cache_ok = True
self.assert_compile(
Integer().with_variant(Foo(), "postgresql"),
"INTEGER",
dialect=dialects.sqlite.dialect(),
)
self.assert_compile(
Integer().with_variant(Foo(), "postgresql"),
"VARCHAR",
dialect=dialects.postgresql.dialect(),
)
class VariantTest(fixtures.TestBase, AssertsCompiledSQL):
def setup_test(self):
class UTypeOne(types.UserDefinedType):
cache_ok = True
def get_col_spec(self):
return "UTYPEONE"
def bind_processor(self, dialect):
def process(value):
return value + "UONE"
return process
class UTypeTwo(types.UserDefinedType):
cache_ok = True
def get_col_spec(self):
return "UTYPETWO"
def bind_processor(self, dialect):
def process(value):
return value + "UTWO"
return process
class UTypeThree(types.UserDefinedType):
cache_ok = True
def get_col_spec(self):
return "UTYPETHREE"
self.UTypeOne = UTypeOne
self.UTypeTwo = UTypeTwo
self.UTypeThree = UTypeThree
self.variant = self.UTypeOne().with_variant(
self.UTypeTwo(), "postgresql"
)
self.composite = self.variant.with_variant(self.UTypeThree(), "mysql")
def test_illegal_dupe(self):
v = self.UTypeOne().with_variant(self.UTypeTwo(), "postgresql")
assert_raises_message(
exc.ArgumentError,
"Dialect 'postgresql' is already present "
"in the mapping for this UTypeOne()",
lambda: v.with_variant(self.UTypeThree(), "postgresql"),
)
def test_no_variants_of_variants(self):
t = Integer().with_variant(Float(), "postgresql")
with expect_raises_message(
exc.ArgumentError,
r"can't pass a type that already has variants as a "
r"dialect-level type to with_variant\(\)",
):
String().with_variant(t, "mysql")
def test_compile(self):
self.assert_compile(self.variant, "UTYPEONE", use_default_dialect=True)
self.assert_compile(
self.variant, "UTYPEONE", dialect=dialects.mysql.dialect()
)
self.assert_compile(
self.variant, "UTYPETWO", dialect=dialects.postgresql.dialect()
)
def test_to_instance(self):
self.assert_compile(
self.UTypeOne().with_variant(self.UTypeTwo, "postgresql"),
"UTYPETWO",
dialect=dialects.postgresql.dialect(),
)
def test_typedec_gen_dialect_impl(self):
"""test that gen_dialect_impl passes onto a TypeDecorator, as
TypeDecorator._gen_dialect_impl() itself has special behaviors.
"""
class MyDialectString(String):
pass
class MyString(TypeDecorator):
impl = String
cache_ok = True
def load_dialect_impl(self, dialect):
return MyDialectString()
variant = String().with_variant(MyString(), "mysql")
dialect_impl = variant._gen_dialect_impl(mysql.dialect())
is_(dialect_impl.impl.__class__, MyDialectString)
def test_compile_composite(self):
self.assert_compile(
self.composite, "UTYPEONE", use_default_dialect=True
)
self.assert_compile(
self.composite, "UTYPETHREE", dialect=dialects.mysql.dialect()
)
self.assert_compile(
self.composite, "UTYPETWO", dialect=dialects.postgresql.dialect()
)
def test_bind_process(self):
eq_(
self.variant._cached_bind_processor(dialects.mysql.dialect())(
"foo"
),
"fooUONE",
)
eq_(
self.variant._cached_bind_processor(default.DefaultDialect())(
"foo"
),
"fooUONE",
)
eq_(
self.variant._cached_bind_processor(dialects.postgresql.dialect())(
"foo"
),
"fooUTWO",
)
def test_bind_process_composite(self):
assert (
self.composite._cached_bind_processor(dialects.mysql.dialect())
is None
)
eq_(
self.composite._cached_bind_processor(default.DefaultDialect())(
"foo"
),
"fooUONE",
)
eq_(
self.composite._cached_bind_processor(
dialects.postgresql.dialect()
)("foo"),
"fooUTWO",
)
def test_comparator_variant(self):
expr = column("x", self.variant) == "bar"
is_(expr.right.type, self.variant)
@testing.only_on("sqlite")
@testing.provide_metadata
def test_round_trip(self, connection):
variant = self.UTypeOne().with_variant(self.UTypeTwo(), "sqlite")
t = Table("t", self.metadata, Column("x", variant))
t.create(connection)
connection.execute(t.insert(), dict(x="foo"))
eq_(connection.scalar(select(t.c.x).where(t.c.x == "foo")), "fooUTWO")
@testing.only_on("sqlite")
@testing.provide_metadata
def test_round_trip_sqlite_datetime(self, connection):
variant = DateTime().with_variant(
dialects.sqlite.DATETIME(truncate_microseconds=True), "sqlite"
)
t = Table("t", self.metadata, Column("x", variant))
t.create(connection)
connection.execute(
t.insert(),
dict(x=datetime.datetime(2015, 4, 18, 10, 15, 17, 4839)),
)
eq_(
connection.scalar(
select(t.c.x).where(
t.c.x == datetime.datetime(2015, 4, 18, 10, 15, 17, 1059)
)
),
datetime.datetime(2015, 4, 18, 10, 15, 17),
)
class EnumTest(AssertsCompiledSQL, fixtures.TablesTest):
__backend__ = True
SomeEnum = pep435_enum("SomeEnum")
one = SomeEnum("one", 1)
two = SomeEnum("two", 2)
three = SomeEnum("three", 3, "four")
a_member = SomeEnum("AMember", "a")
b_member = SomeEnum("BMember", "b")
SomeOtherEnum = pep435_enum("SomeOtherEnum")
other_one = SomeOtherEnum("one", 1)
other_two = SomeOtherEnum("two", 2)
other_three = SomeOtherEnum("three", 3)
other_a_member = SomeOtherEnum("AMember", "a")
other_b_member = SomeOtherEnum("BMember", "b")
@staticmethod
def get_enum_string_values(some_enum):
return [str(v.value) for v in some_enum.__members__.values()]
@classmethod
def define_tables(cls, metadata):
# note create_constraint has changed in 1.4 as of #5367
Table(
"enum_table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"someenum",
Enum(
"one",
"two",
"three",
name="myenum",
create_constraint=True,
),
),
)
Table(
"non_native_enum_table",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column(
"someenum",
Enum(
"one",
"two",
"three",
native_enum=False,
create_constraint=True,
),
),
Column(
"someotherenum",
Enum(
"one",
"two",
"three",
native_enum=False,
validate_strings=True,
),
),
)
Table(
"stdlib_enum_table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"someenum",
Enum(cls.SomeEnum, create_constraint=True, omit_aliases=False),
),
)
Table(
"stdlib_enum_table_no_alias",
metadata,
Column("id", Integer, primary_key=True),
Column(
"someenum",
Enum(
cls.SomeEnum,
create_constraint=True,
omit_aliases=True,
name="someenum_no_alias",
),
),
)
Table(
"stdlib_enum_table2",
metadata,
Column("id", Integer, primary_key=True),
Column(
"someotherenum",
Enum(
cls.SomeOtherEnum,
values_callable=EnumTest.get_enum_string_values,
create_constraint=True,
),
),
)
def test_python_type(self):
eq_(types.Enum(self.SomeOtherEnum).python_type, self.SomeOtherEnum)
def test_pickle_types(self):
global SomeEnum
SomeEnum = self.SomeEnum
for loads, dumps in picklers():
column_types = [
Column("Enu", Enum("x", "y", "z", name="somename")),
Column("En2", Enum(self.SomeEnum, omit_aliases=False)),
]
for column_type in column_types:
meta = MetaData()
Table("foo", meta, column_type)
loads(dumps(column_type))
loads(dumps(meta))
def test_validators_pep435(self):
type_ = Enum(self.SomeEnum, omit_aliases=False)
validate_type = Enum(
self.SomeEnum, validate_strings=True, omit_aliases=False
)
bind_processor = type_.bind_processor(testing.db.dialect)
bind_processor_validates = validate_type.bind_processor(
testing.db.dialect
)
eq_(bind_processor("one"), "one")
eq_(bind_processor(self.one), "one")
eq_(bind_processor("foo"), "foo")
assert_raises_message(
LookupError,
"'5' is not among the defined enum values. Enum name: someenum. "
"Possible values: one, two, three, ..., BMember",
bind_processor,
5,
)
assert_raises_message(
LookupError,
"'foo' is not among the defined enum values. Enum name: someenum. "
"Possible values: one, two, three, ..., BMember",
bind_processor_validates,
"foo",
)
result_processor = type_.result_processor(testing.db.dialect, None)
eq_(result_processor("one"), self.one)
assert_raises_message(
LookupError,
"'foo' is not among the defined enum values. Enum name: someenum. "
"Possible values: one, two, three, ..., BMember",
result_processor,
"foo",
)
literal_processor = type_.literal_processor(testing.db.dialect)
validate_literal_processor = validate_type.literal_processor(
testing.db.dialect
)
eq_(literal_processor("one"), "'one'")
eq_(literal_processor("foo"), "'foo'")
assert_raises_message(
LookupError,
"'5' is not among the defined enum values. Enum name: someenum. "
"Possible values: one, two, three, ..., BMember",
literal_processor,
5,
)
assert_raises_message(
LookupError,
"'foo' is not among the defined enum values. Enum name: someenum. "
"Possible values: one, two, three, ..., BMember",
validate_literal_processor,
"foo",
)
def test_validators_plain(self):
type_ = Enum("one", "two")
validate_type = Enum("one", "two", validate_strings=True)
bind_processor = type_.bind_processor(testing.db.dialect)
bind_processor_validates = validate_type.bind_processor(
testing.db.dialect
)
eq_(bind_processor("one"), "one")
eq_(bind_processor("foo"), "foo")
assert_raises_message(
LookupError,
"'5' is not among the defined enum values. Enum name: None. "
"Possible values: one, two",
bind_processor,
5,
)
assert_raises_message(
LookupError,
"'foo' is not among the defined enum values. Enum name: None. "
"Possible values: one, two",
bind_processor_validates,
"foo",
)
result_processor = type_.result_processor(testing.db.dialect, None)
eq_(result_processor("one"), "one")
assert_raises_message(
LookupError,
"'foo' is not among the defined enum values. Enum name: None. "
"Possible values: one, two",
result_processor,
"foo",
)
literal_processor = type_.literal_processor(testing.db.dialect)
validate_literal_processor = validate_type.literal_processor(
testing.db.dialect
)
eq_(literal_processor("one"), "'one'")
eq_(literal_processor("foo"), "'foo'")
assert_raises_message(
LookupError,
"'5' is not among the defined enum values. Enum name: None. "
"Possible values: one, two",
literal_processor,
5,
)
assert_raises_message(
LookupError,
"'foo' is not among the defined enum values. Enum name: None. "
"Possible values: one, two",
validate_literal_processor,
"foo",
)
def test_enum_raise_lookup_ellipses(self):
type_ = Enum("one", "twothreefourfivesix", "seven", "eight")
bind_processor = type_.bind_processor(testing.db.dialect)
eq_(bind_processor("one"), "one")
assert_raises_message(
LookupError,
"'5' is not among the defined enum values. Enum name: None. "
"Possible values: one, twothreefou.., seven, eight",
bind_processor,
5,
)
def test_enum_raise_lookup_none(self):
type_ = Enum()
bind_processor = type_.bind_processor(testing.db.dialect)
assert_raises_message(
LookupError,
"'5' is not among the defined enum values. Enum name: None. "
"Possible values: None",
bind_processor,
5,
)
def test_validators_not_in_like_roundtrip(self, connection):
enum_table = self.tables["non_native_enum_table"]
connection.execute(
enum_table.insert(),
[
{"id": 1, "someenum": "two"},
{"id": 2, "someenum": "two"},
{"id": 3, "someenum": "one"},
],
)
eq_(
connection.execute(
enum_table.select()
.where(enum_table.c.someenum.like("%wo%"))
.order_by(enum_table.c.id)
).fetchall(),
[(1, "two", None), (2, "two", None)],
)
def test_validators_not_in_concatenate_roundtrip(self, connection):
enum_table = self.tables["non_native_enum_table"]
connection.execute(
enum_table.insert(),
[
{"id": 1, "someenum": "two"},
{"id": 2, "someenum": "two"},
{"id": 3, "someenum": "one"},
],
)
eq_(
connection.execute(
select("foo" + enum_table.c.someenum).order_by(enum_table.c.id)
).fetchall(),
[("footwo",), ("footwo",), ("fooone",)],
)
def test_round_trip(self, connection):
enum_table = self.tables["enum_table"]
connection.execute(
enum_table.insert(),
[
{"id": 1, "someenum": "two"},
{"id": 2, "someenum": "two"},
{"id": 3, "someenum": "one"},
],
)
eq_(
connection.execute(
enum_table.select().order_by(enum_table.c.id)
).fetchall(),
[(1, "two"), (2, "two"), (3, "one")],
)
def test_null_round_trip(self, connection):
enum_table = self.tables.enum_table
non_native_enum_table = self.tables.non_native_enum_table
connection.execute(enum_table.insert(), {"id": 1, "someenum": None})
eq_(connection.scalar(select(enum_table.c.someenum)), None)
connection.execute(
non_native_enum_table.insert(), {"id": 1, "someenum": None}
)
eq_(connection.scalar(select(non_native_enum_table.c.someenum)), None)
@testing.requires.enforces_check_constraints
def test_check_constraint(self, connection):
assert_raises(
(
exc.IntegrityError,
exc.ProgrammingError,
exc.OperationalError,
# PyMySQL raising InternalError until
# https://github.com/PyMySQL/PyMySQL/issues/607 is resolved
exc.InternalError,
),
connection.exec_driver_sql,
"insert into non_native_enum_table "
"(id, someenum) values(1, 'four')",
)
@testing.requires.enforces_check_constraints
def test_variant_default_is_not_schematype(self, metadata):
t = Table(
"my_table",
metadata,
Column(
"data",
String(50).with_variant(
Enum(
"four",
"five",
"six",
native_enum=False,
name="e2",
create_constraint=True,
),
testing.db.dialect.name,
),
),
)
# the base String() didnt create a constraint or even do any
# events. But Column looked for SchemaType in _variant_mapping
# and found our type anyway.
eq_(
len([c for c in t.constraints if isinstance(c, CheckConstraint)]),
1,
)
metadata.create_all(testing.db)
# not using the connection fixture because we need to rollback and
# start again in the middle
with testing.db.connect() as connection:
# postgresql needs this in order to continue after the exception
trans = connection.begin()
assert_raises(
(exc.DBAPIError,),
connection.exec_driver_sql,
"insert into my_table (data) values('two')",
)
trans.rollback()
with connection.begin():
connection.exec_driver_sql(
"insert into my_table (data) values ('four')"
)
eq_(connection.execute(select(t.c.data)).scalar(), "four")
@testing.requires.enforces_check_constraints
def test_variant_we_are_default(self, metadata):
# test that the "variant" does not create a constraint
t = Table(
"my_table",
metadata,
Column(
"data",
Enum(
"one",
"two",
"three",
native_enum=False,
name="e1",
create_constraint=True,
).with_variant(
Enum(
"four",
"five",
"six",
native_enum=False,
name="e2",
create_constraint=True,
),
"some_other_db",
),
),
mysql_engine="InnoDB",
)
eq_(
len([c for c in t.constraints if isinstance(c, CheckConstraint)]),
2,
)
metadata.create_all(testing.db)
# not using the connection fixture because we need to rollback and
# start again in the middle
with testing.db.connect() as connection:
# postgresql needs this in order to continue after the exception
trans = connection.begin()
assert_raises(
(exc.DBAPIError,),
connection.exec_driver_sql,
"insert into my_table " "(data) values('four')",
)
trans.rollback()
with connection.begin():
connection.exec_driver_sql(
"insert into my_table (data) values ('two')"
)
eq_(connection.execute(select(t.c.data)).scalar(), "two")
@testing.requires.enforces_check_constraints
def test_variant_we_are_not_default(self, metadata):
# test that the "variant" does not create a constraint
t = Table(
"my_table",
metadata,
Column(
"data",
Enum(
"one",
"two",
"three",
native_enum=False,
name="e1",
create_constraint=True,
).with_variant(
Enum(
"four",
"five",
"six",
native_enum=False,
name="e2",
create_constraint=True,
),
testing.db.dialect.name,
),
),
)
# ensure Variant isn't exploding the constraints
eq_(
len([c for c in t.constraints if isinstance(c, CheckConstraint)]),
2,
)
metadata.create_all(testing.db)
# not using the connection fixture because we need to rollback and
# start again in the middle
with testing.db.connect() as connection:
# postgresql needs this in order to continue after the exception
trans = connection.begin()
assert_raises(
(exc.DBAPIError,),
connection.exec_driver_sql,
"insert into my_table (data) values('two')",
)
trans.rollback()
with connection.begin():
connection.exec_driver_sql(
"insert into my_table (data) values ('four')"
)
eq_(connection.execute(select(t.c.data)).scalar(), "four")
def test_skip_check_constraint(self, connection):
connection.exec_driver_sql(
"insert into non_native_enum_table "
"(id, someotherenum) values(1, 'four')"
)
eq_(
connection.exec_driver_sql(
"select someotherenum from non_native_enum_table"
).scalar(),
"four",
)
assert_raises_message(
LookupError,
"'four' is not among the defined enum values. "
"Enum name: None. Possible values: one, two, three",
connection.scalar,
select(self.tables.non_native_enum_table.c.someotherenum),
)
def test_non_native_round_trip(self, connection):
non_native_enum_table = self.tables["non_native_enum_table"]
connection.execute(
non_native_enum_table.insert(),
[
{"id": 1, "someenum": "two"},
{"id": 2, "someenum": "two"},
{"id": 3, "someenum": "one"},
],
)
eq_(
connection.execute(
select(
non_native_enum_table.c.id,
non_native_enum_table.c.someenum,
).order_by(non_native_enum_table.c.id)
).fetchall(),
[(1, "two"), (2, "two"), (3, "one")],
)
def test_pep435_default_sort_key(self):
one, two, a_member, b_member = (
self.one,
self.two,
self.a_member,
self.b_member,
)
typ = Enum(self.SomeEnum, omit_aliases=False)
is_(typ.sort_key_function.__func__, typ._db_value_for_elem.__func__)
eq_(
sorted([two, one, a_member, b_member], key=typ.sort_key_function),
[a_member, b_member, one, two],
)
def test_pep435_custom_sort_key(self):
one, two, a_member, b_member = (
self.one,
self.two,
self.a_member,
self.b_member,
)
def sort_enum_key_value(value):
return str(value.value)
typ = Enum(
self.SomeEnum,
sort_key_function=sort_enum_key_value,
omit_aliases=False,
)
is_(typ.sort_key_function, sort_enum_key_value)
eq_(
sorted([two, one, a_member, b_member], key=typ.sort_key_function),
[one, two, a_member, b_member],
)
def test_pep435_no_sort_key(self):
typ = Enum(self.SomeEnum, sort_key_function=None, omit_aliases=False)
is_(typ.sort_key_function, None)
def test_pep435_enum_round_trip(self, connection):
stdlib_enum_table = self.tables["stdlib_enum_table"]
connection.execute(
stdlib_enum_table.insert(),
[
{"id": 1, "someenum": self.SomeEnum.two},
{"id": 2, "someenum": self.SomeEnum.two},
{"id": 3, "someenum": self.SomeEnum.one},
{"id": 4, "someenum": self.SomeEnum.three},
{"id": 5, "someenum": self.SomeEnum.four},
{"id": 6, "someenum": "three"},
{"id": 7, "someenum": "four"},
],
)
eq_(
connection.execute(
stdlib_enum_table.select().order_by(stdlib_enum_table.c.id)
).fetchall(),
[
(1, self.SomeEnum.two),
(2, self.SomeEnum.two),
(3, self.SomeEnum.one),
(4, self.SomeEnum.three),
(5, self.SomeEnum.three),
(6, self.SomeEnum.three),
(7, self.SomeEnum.three),
],
)
def test_pep435_enum_values_callable_round_trip(self, connection):
stdlib_enum_table_custom_values = self.tables["stdlib_enum_table2"]
connection.execute(
stdlib_enum_table_custom_values.insert(),
[
{"id": 1, "someotherenum": self.SomeOtherEnum.AMember},
{"id": 2, "someotherenum": self.SomeOtherEnum.BMember},
{"id": 3, "someotherenum": self.SomeOtherEnum.AMember},
],
)
eq_(
connection.execute(
stdlib_enum_table_custom_values.select().order_by(
stdlib_enum_table_custom_values.c.id
)
).fetchall(),
[
(1, self.SomeOtherEnum.AMember),
(2, self.SomeOtherEnum.BMember),
(3, self.SomeOtherEnum.AMember),
],
)
def test_pep435_enum_expanding_in(self, connection):
stdlib_enum_table_custom_values = self.tables["stdlib_enum_table2"]
connection.execute(
stdlib_enum_table_custom_values.insert(),
[
{"id": 1, "someotherenum": self.SomeOtherEnum.one},
{"id": 2, "someotherenum": self.SomeOtherEnum.two},
{"id": 3, "someotherenum": self.SomeOtherEnum.three},
],
)
stmt = (
stdlib_enum_table_custom_values.select()
.where(
stdlib_enum_table_custom_values.c.someotherenum.in_(
bindparam("member", expanding=True)
)
)
.order_by(stdlib_enum_table_custom_values.c.id)
)
eq_(
connection.execute(
stmt,
{"member": [self.SomeOtherEnum.one, self.SomeOtherEnum.three]},
).fetchall(),
[(1, self.SomeOtherEnum.one), (3, self.SomeOtherEnum.three)],
)
def test_adapt(self):
from sqlalchemy.dialects.postgresql import ENUM
e1 = Enum("one", "two", "three", native_enum=False)
false_adapt = e1.adapt(ENUM)
eq_(false_adapt.native_enum, False)
assert not isinstance(false_adapt, ENUM)
e1 = Enum("one", "two", "three", native_enum=True)
true_adapt = e1.adapt(ENUM)
eq_(true_adapt.native_enum, True)
assert isinstance(true_adapt, ENUM)
e1 = Enum(
"one",
"two",
"three",
name="foo",
schema="bar",
metadata=MetaData(),
)
eq_(e1.adapt(ENUM).name, "foo")
eq_(e1.adapt(ENUM).schema, "bar")
is_(e1.adapt(ENUM).metadata, e1.metadata)
eq_(e1.adapt(Enum).name, "foo")
eq_(e1.adapt(Enum).schema, "bar")
is_(e1.adapt(Enum).metadata, e1.metadata)
e1 = Enum(self.SomeEnum, omit_aliases=False)
eq_(e1.adapt(ENUM).name, "someenum")
eq_(
e1.adapt(ENUM).enums,
["one", "two", "three", "four", "AMember", "BMember"],
)
e1_vc = Enum(
self.SomeOtherEnum, values_callable=EnumTest.get_enum_string_values
)
eq_(e1_vc.adapt(ENUM).name, "someotherenum")
eq_(e1_vc.adapt(ENUM).enums, ["1", "2", "3", "a", "b"])
def test_adapt_length(self):
from sqlalchemy.dialects.postgresql import ENUM
e1 = Enum("one", "two", "three", length=50, native_enum=False)
eq_(e1.adapt(ENUM).length, 50)
eq_(e1.adapt(Enum).length, 50)
e1 = Enum("one", "two", "three")
eq_(e1.length, 5)
eq_(e1.adapt(ENUM).length, 5)
eq_(e1.adapt(Enum).length, 5)
@testing.provide_metadata
def test_create_metadata_bound_no_crash(self):
m1 = self.metadata
Enum("a", "b", "c", metadata=m1, name="ncenum")
m1.create_all(testing.db)
def test_non_native_constraint_custom_type(self):
class Foob:
def __init__(self, name):
self.name = name
class MyEnum(TypeDecorator):
cache_ok = True
def __init__(self, values):
self.impl = Enum(
*[v.name for v in values],
name="myenum",
native_enum=False,
create_constraint=True,
)
# future method
def process_literal_param(self, value, dialect):
return value.name
def process_bind_param(self, value, dialect):
return value.name
m = MetaData()
t1 = Table("t", m, Column("x", MyEnum([Foob("a"), Foob("b")])))
const = [c for c in t1.constraints if isinstance(c, CheckConstraint)][
0
]
self.assert_compile(
AddConstraint(const),
"ALTER TABLE t ADD CONSTRAINT myenum CHECK (x IN ('a', 'b'))",
dialect="default",
)
def test_lookup_failure(self, connection):
assert_raises(
exc.StatementError,
connection.execute,
self.tables["non_native_enum_table"].insert(),
{"id": 4, "someotherenum": "four"},
)
def test_mock_engine_no_prob(self):
"""ensure no 'checkfirst' queries are run when enums
are created with checkfirst=False"""
e = engines.mock_engine()
t = Table(
"t1",
MetaData(),
Column("x", Enum("x", "y", name="pge", create_constraint=True)),
)
t.create(e, checkfirst=False)
# basically looking for the start of
# the constraint, or the ENUM def itself,
# depending on backend.
assert "('x'," in e.print_sql()
def test_repr(self):
e = Enum(
"x",
"y",
name="somename",
quote=True,
inherit_schema=True,
native_enum=False,
)
eq_(
repr(e),
"Enum('x', 'y', name='somename', "
"inherit_schema=True, native_enum=False)",
)
def test_repr_two(self):
e = Enum("x", "y", name="somename", create_constraint=True)
eq_(
repr(e),
"Enum('x', 'y', name='somename', create_constraint=True)",
)
def test_repr_three(self):
e = Enum("x", "y", native_enum=False, length=255)
eq_(
repr(e),
"Enum('x', 'y', native_enum=False, length=255)",
)
def test_repr_four(self):
with expect_warnings(
"Enum 'length' argument is currently ignored unless native_enum"
):
e = Enum("x", "y", length=255)
# length is currently ignored if native_enum is not False
eq_(
repr(e),
"Enum('x', 'y')",
)
def test_length_native(self):
with expect_warnings(
"Enum 'length' argument is currently ignored unless native_enum"
):
e = Enum("x", "y", "long", length=42)
eq_(e.length, len("long"))
# no error is raised
with expect_warnings(
"Enum 'length' argument is currently ignored unless native_enum"
):
e = Enum("x", "y", "long", length=1)
eq_(e.length, len("long"))
def test_length_raises(self):
assert_raises_message(
ValueError,
"When provided, length must be larger or equal.*",
Enum,
"x",
"y",
"long",
native_enum=False,
length=1,
)
def test_no_length_non_native(self):
e = Enum("x", "y", "long", native_enum=False)
eq_(e.length, len("long"))
def test_length_non_native(self):
e = Enum("x", "y", "long", native_enum=False, length=42)
eq_(e.length, 42)
def test_omit_aliases(self, connection):
table0 = self.tables["stdlib_enum_table"]
type0 = table0.c.someenum.type
eq_(type0.enums, ["one", "two", "three", "four", "AMember", "BMember"])
table = self.tables["stdlib_enum_table_no_alias"]
type_ = table.c.someenum.type
eq_(type_.enums, ["one", "two", "three", "AMember", "BMember"])
connection.execute(
table.insert(),
[
{"id": 1, "someenum": self.SomeEnum.three},
{"id": 2, "someenum": self.SomeEnum.four},
],
)
eq_(
connection.execute(table.select().order_by(table.c.id)).fetchall(),
[(1, self.SomeEnum.three), (2, self.SomeEnum.three)],
)
@testing.combinations(
(True, "native"), (False, "non_native"), id_="ai", argnames="native"
)
@testing.combinations(
(True, "omit_alias"), (False, "with_alias"), id_="ai", argnames="omit"
)
@testing.skip_if("mysql < 8")
def test_duplicate_values_accepted(
self, metadata, connection, native, omit
):
foo_enum = pep435_enum("foo_enum")
foo_enum("one", 1, "two")
foo_enum("three", 3, "four")
tbl = sa.Table(
"foo_table",
metadata,
sa.Column("id", sa.Integer),
sa.Column(
"data",
sa.Enum(
foo_enum,
native_enum=native,
omit_aliases=omit,
create_constraint=True,
),
),
)
t = sa.table("foo_table", sa.column("id"), sa.column("data"))
metadata.create_all(connection)
if omit:
with expect_raises(
(
exc.IntegrityError,
exc.DataError,
exc.OperationalError,
exc.DBAPIError,
)
):
connection.execute(
t.insert(),
[
{"id": 1, "data": "four"},
{"id": 2, "data": "three"},
],
)
else:
connection.execute(
t.insert(),
[{"id": 1, "data": "four"}, {"id": 2, "data": "three"}],
)
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
[(1, "four"), (2, "three")],
)
eq_(
connection.execute(tbl.select().order_by(tbl.c.id)).fetchall(),
[(1, foo_enum.three), (2, foo_enum.three)],
)
MyPickleType = None
class BinaryTest(fixtures.TablesTest, AssertsExecutionResults):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
global MyPickleType
class MyPickleType(types.TypeDecorator):
impl = PickleType
cache_ok = True
def process_bind_param(self, value, dialect):
if value:
value.stuff = "this is modified stuff"
return value
def process_result_value(self, value, dialect):
if value:
value.stuff = "this is the right stuff"
return value
Table(
"binary_table",
metadata,
Column(
"primary_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", LargeBinary),
Column("data_slice", LargeBinary(100)),
Column("misc", String(30)),
Column("pickled", PickleType),
Column("mypickle", MyPickleType),
)
@testing.requires.non_broken_binary
def test_round_trip(self, connection):
binary_table = self.tables.binary_table
testobj1 = pickleable.Foo("im foo 1")
testobj2 = pickleable.Foo("im foo 2")
testobj3 = pickleable.Foo("im foo 3")
stream1 = self.load_stream("binary_data_one.dat")
stream2 = self.load_stream("binary_data_two.dat")
connection.execute(
binary_table.insert(),
dict(
primary_id=1,
misc="binary_data_one.dat",
data=stream1,
data_slice=stream1[0:100],
pickled=testobj1,
mypickle=testobj3,
),
)
connection.execute(
binary_table.insert(),
dict(
primary_id=2,
misc="binary_data_two.dat",
data=stream2,
data_slice=stream2[0:99],
pickled=testobj2,
),
)
connection.execute(
binary_table.insert(),
dict(
primary_id=3,
misc="binary_data_two.dat",
data=None,
data_slice=stream2[0:99],
pickled=None,
),
)
for stmt in (
binary_table.select().order_by(binary_table.c.primary_id),
text(
"select * from binary_table order by binary_table.primary_id",
).columns(
**{
"pickled": PickleType,
"mypickle": MyPickleType,
"data": LargeBinary,
"data_slice": LargeBinary,
}
),
):
result = connection.execute(stmt).fetchall()
eq_(stream1, result[0]._mapping["data"])
eq_(stream1[0:100], result[0]._mapping["data_slice"])
eq_(stream2, result[1]._mapping["data"])
eq_(testobj1, result[0]._mapping["pickled"])
eq_(testobj2, result[1]._mapping["pickled"])
eq_(testobj3.moredata, result[0]._mapping["mypickle"].moredata)
eq_(
result[0]._mapping["mypickle"].stuff, "this is the right stuff"
)
@testing.requires.binary_comparisons
def test_comparison(self, connection):
"""test that type coercion occurs on comparison for binary"""
binary_table = self.tables.binary_table
expr = binary_table.c.data == "foo"
assert isinstance(expr.right.type, LargeBinary)
data = os.urandom(32)
connection.execute(binary_table.insert(), dict(data=data))
eq_(
connection.scalar(
select(func.count("*"))
.select_from(binary_table)
.where(binary_table.c.data == data)
),
1,
)
@testing.requires.binary_literals
def test_literal_roundtrip(self, connection):
compiled = select(cast(literal(util.b("foo")), LargeBinary)).compile(
dialect=testing.db.dialect, compile_kwargs={"literal_binds": True}
)
result = connection.execute(compiled)
eq_(result.scalar(), util.b("foo"))
def test_bind_processor_no_dbapi(self):
b = LargeBinary()
eq_(b.bind_processor(default.DefaultDialect()), None)
def load_stream(self, name):
f = os.path.join(os.path.dirname(__file__), "..", name)
with open(f, mode="rb") as o:
return o.read()
class JSONTest(fixtures.TestBase):
def setup_test(self):
metadata = MetaData()
self.test_table = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("test_column", JSON),
)
self.jsoncol = self.test_table.c.test_column
self.dialect = default.DefaultDialect()
self.dialect._json_serializer = None
self.dialect._json_deserializer = None
def test_bind_serialize_default(self):
proc = self.test_table.c.test_column.type._cached_bind_processor(
self.dialect
)
eq_(
proc({"A": [1, 2, 3, True, False]}),
'{"A": [1, 2, 3, true, false]}',
)
def test_bind_serialize_None(self):
proc = self.test_table.c.test_column.type._cached_bind_processor(
self.dialect
)
eq_(proc(None), "null")
def test_bind_serialize_none_as_null(self):
proc = JSON(none_as_null=True)._cached_bind_processor(self.dialect)
eq_(proc(None), None)
eq_(proc(null()), None)
def test_bind_serialize_null(self):
proc = self.test_table.c.test_column.type._cached_bind_processor(
self.dialect
)
eq_(proc(null()), None)
def test_result_deserialize_default(self):
proc = self.test_table.c.test_column.type._cached_result_processor(
self.dialect, None
)
eq_(
proc('{"A": [1, 2, 3, true, false]}'),
{"A": [1, 2, 3, True, False]},
)
def test_result_deserialize_null(self):
proc = self.test_table.c.test_column.type._cached_result_processor(
self.dialect, None
)
eq_(proc("null"), None)
def test_result_deserialize_None(self):
proc = self.test_table.c.test_column.type._cached_result_processor(
self.dialect, None
)
eq_(proc(None), None)
def _dialect_index_fixture(self, int_processor, str_processor):
class MyInt(Integer):
def bind_processor(self, dialect):
return lambda value: value + 10
def literal_processor(self, diaect):
return lambda value: str(value + 15)
class MyString(String):
def bind_processor(self, dialect):
return lambda value: value + "10"
def literal_processor(self, diaect):
return lambda value: value + "15"
class MyDialect(default.DefaultDialect):
colspecs = {}
if int_processor:
colspecs[Integer] = MyInt
if str_processor:
colspecs[String] = MyString
return MyDialect()
def test_index_bind_proc_int(self):
expr = self.test_table.c.test_column[5]
int_dialect = self._dialect_index_fixture(True, True)
non_int_dialect = self._dialect_index_fixture(False, True)
bindproc = expr.right.type._cached_bind_processor(int_dialect)
eq_(bindproc(expr.right.value), 15)
bindproc = expr.right.type._cached_bind_processor(non_int_dialect)
eq_(bindproc(expr.right.value), 5)
def test_index_literal_proc_int(self):
expr = self.test_table.c.test_column[5]
int_dialect = self._dialect_index_fixture(True, True)
non_int_dialect = self._dialect_index_fixture(False, True)
bindproc = expr.right.type._cached_literal_processor(int_dialect)
eq_(bindproc(expr.right.value), "20")
bindproc = expr.right.type._cached_literal_processor(non_int_dialect)
eq_(bindproc(expr.right.value), "5")
def test_index_bind_proc_str(self):
expr = self.test_table.c.test_column["five"]
str_dialect = self._dialect_index_fixture(True, True)
non_str_dialect = self._dialect_index_fixture(False, False)
bindproc = expr.right.type._cached_bind_processor(str_dialect)
eq_(bindproc(expr.right.value), "five10")
bindproc = expr.right.type._cached_bind_processor(non_str_dialect)
eq_(bindproc(expr.right.value), "five")
def test_index_literal_proc_str(self):
expr = self.test_table.c.test_column["five"]
str_dialect = self._dialect_index_fixture(True, True)
non_str_dialect = self._dialect_index_fixture(False, False)
bindproc = expr.right.type._cached_literal_processor(str_dialect)
eq_(bindproc(expr.right.value), "five15")
bindproc = expr.right.type._cached_literal_processor(non_str_dialect)
eq_(bindproc(expr.right.value), "'five'")
class ArrayTest(fixtures.TestBase):
def _myarray_fixture(self):
class MyArray(ARRAY):
pass
return MyArray
def test_array_index_map_dimensions(self):
col = column("x", ARRAY(Integer, dimensions=3))
is_(col[5].type._type_affinity, ARRAY)
eq_(col[5].type.dimensions, 2)
is_(col[5][6].type._type_affinity, ARRAY)
eq_(col[5][6].type.dimensions, 1)
is_(col[5][6][7].type._type_affinity, Integer)
def test_array_getitem_single_type(self):
m = MetaData()
arrtable = Table(
"arrtable",
m,
Column("intarr", ARRAY(Integer)),
Column("strarr", ARRAY(String)),
)
is_(arrtable.c.intarr[1].type._type_affinity, Integer)
is_(arrtable.c.strarr[1].type._type_affinity, String)
def test_array_getitem_slice_type(self):
m = MetaData()
arrtable = Table(
"arrtable",
m,
Column("intarr", ARRAY(Integer)),
Column("strarr", ARRAY(String)),
)
is_(arrtable.c.intarr[1:3].type._type_affinity, ARRAY)
is_(arrtable.c.strarr[1:3].type._type_affinity, ARRAY)
def test_array_getitem_slice_type_dialect_level(self):
MyArray = self._myarray_fixture()
m = MetaData()
arrtable = Table(
"arrtable",
m,
Column("intarr", MyArray(Integer)),
Column("strarr", MyArray(String)),
)
is_(arrtable.c.intarr[1:3].type._type_affinity, ARRAY)
is_(arrtable.c.strarr[1:3].type._type_affinity, ARRAY)
# but the slice returns the actual type
assert isinstance(arrtable.c.intarr[1:3].type, MyArray)
assert isinstance(arrtable.c.strarr[1:3].type, MyArray)
MyCustomType = MyTypeDec = None
class ExpressionTest(
fixtures.TablesTest, AssertsExecutionResults, AssertsCompiledSQL
):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
global MyCustomType, MyTypeDec
class MyCustomType(types.UserDefinedType):
cache_ok = True
def get_col_spec(self):
return "INT"
def bind_processor(self, dialect):
def process(value):
return value * 10
return process
def result_processor(self, dialect, coltype):
def process(value):
return value / 10
return process
class MyOldCustomType(MyCustomType):
def adapt_operator(self, op):
return {
operators.add: operators.sub,
operators.sub: operators.add,
}.get(op, op)
class MyTypeDec(types.TypeDecorator):
impl = String
cache_ok = True
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
class MyDecOfDec(types.TypeDecorator):
impl = MyTypeDec
cache_ok = True
Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
Column("atimestamp", Date),
Column("avalue", MyCustomType),
Column("bvalue", MyTypeDec(50)),
Column("cvalue", MyDecOfDec(50)),
)
@classmethod
def insert_data(cls, connection):
test_table = cls.tables.test
connection.execute(
test_table.insert(),
{
"id": 1,
"data": "somedata",
"atimestamp": datetime.date(2007, 10, 15),
"avalue": 25,
"bvalue": "foo",
"cvalue": "foo",
},
)
def test_control(self, connection):
test_table = self.tables.test
assert (
connection.exec_driver_sql("select avalue from test").scalar()
== 250
)
eq_(
connection.execute(test_table.select()).fetchall(),
[
(
1,
"somedata",
datetime.date(2007, 10, 15),
25,
"BIND_INfooBIND_OUT",
"BIND_INfooBIND_OUT",
)
],
)
def test_bind_adapt(self, connection):
# test an untyped bind gets the left side's type
test_table = self.tables.test
expr = test_table.c.atimestamp == bindparam("thedate")
eq_(expr.right.type._type_affinity, Date)
eq_(
connection.execute(
select(
test_table.c.id,
test_table.c.data,
test_table.c.atimestamp,
).where(expr),
{"thedate": datetime.date(2007, 10, 15)},
).fetchall(),
[(1, "somedata", datetime.date(2007, 10, 15))],
)
expr = test_table.c.avalue == bindparam("somevalue")
eq_(expr.right.type._type_affinity, MyCustomType)
eq_(
connection.execute(
test_table.select().where(expr), {"somevalue": 25}
).fetchall(),
[
(
1,
"somedata",
datetime.date(2007, 10, 15),
25,
"BIND_INfooBIND_OUT",
"BIND_INfooBIND_OUT",
)
],
)
expr = test_table.c.bvalue == bindparam("somevalue")
eq_(expr.right.type._type_affinity, String)
eq_(
connection.execute(
test_table.select().where(expr), {"somevalue": "foo"}
).fetchall(),
[
(
1,
"somedata",
datetime.date(2007, 10, 15),
25,
"BIND_INfooBIND_OUT",
"BIND_INfooBIND_OUT",
)
],
)
def test_grouped_bind_adapt(self):
test_table = self.tables.test
expr = test_table.c.atimestamp == elements.Grouping(
bindparam("thedate")
)
eq_(expr.right.type._type_affinity, Date)
eq_(expr.right.element.type._type_affinity, Date)
expr = test_table.c.atimestamp == elements.Grouping(
elements.Grouping(bindparam("thedate"))
)
eq_(expr.right.type._type_affinity, Date)
eq_(expr.right.element.type._type_affinity, Date)
eq_(expr.right.element.element.type._type_affinity, Date)
def test_bind_adapt_update(self):
test_table = self.tables.test
bp = bindparam("somevalue")
stmt = test_table.update().values(avalue=bp)
compiled = stmt.compile()
eq_(bp.type._type_affinity, types.NullType)
eq_(compiled.binds["somevalue"].type._type_affinity, MyCustomType)
def test_bind_adapt_insert(self):
test_table = self.tables.test
bp = bindparam("somevalue")
stmt = test_table.insert().values(avalue=bp)
compiled = stmt.compile()
eq_(bp.type._type_affinity, types.NullType)
eq_(compiled.binds["somevalue"].type._type_affinity, MyCustomType)
def test_bind_adapt_expression(self):
test_table = self.tables.test
bp = bindparam("somevalue")
stmt = test_table.c.avalue == bp
eq_(bp.type._type_affinity, types.NullType)
eq_(stmt.right.type._type_affinity, MyCustomType)
def test_literal_adapt(self):
# literals get typed based on the types dictionary, unless
# compatible with the left side type
expr = column("foo", String) == 5
eq_(expr.right.type._type_affinity, Integer)
expr = column("foo", String) == "asdf"
eq_(expr.right.type._type_affinity, String)
expr = column("foo", CHAR) == 5
eq_(expr.right.type._type_affinity, Integer)
expr = column("foo", CHAR) == "asdf"
eq_(expr.right.type.__class__, CHAR)
@testing.combinations(
(5, Integer),
(2.65, Float),
(True, Boolean),
(decimal.Decimal("2.65"), Numeric),
(datetime.date(2015, 7, 20), Date),
(datetime.time(10, 15, 20), Time),
(datetime.datetime(2015, 7, 20, 10, 15, 20), DateTime),
(datetime.timedelta(seconds=5), Interval),
(None, types.NullType),
)
def test_actual_literal_adapters(self, data, expected):
is_(literal(data).type.__class__, expected)
def test_typedec_operator_adapt(self, connection):
test_table = self.tables.test
expr = test_table.c.bvalue + "hi"
assert expr.type.__class__ is MyTypeDec
assert expr.right.type.__class__ is MyTypeDec
eq_(
connection.execute(select(expr.label("foo"))).scalar(),
"BIND_INfooBIND_INhiBIND_OUT",
)
def test_typedec_is_adapt(self):
class CoerceNothing(TypeDecorator):
coerce_to_is_types = ()
impl = Integer
cache_ok = True
class CoerceBool(TypeDecorator):
coerce_to_is_types = (bool,)
impl = Boolean
cache_ok = True
class CoerceNone(TypeDecorator):
coerce_to_is_types = (type(None),)
impl = Integer
cache_ok = True
c1 = column("x", CoerceNothing())
c2 = column("x", CoerceBool())
c3 = column("x", CoerceNone())
self.assert_compile(
and_(c1 == None, c2 == None, c3 == None), # noqa
"x = :x_1 AND x = :x_2 AND x IS NULL",
)
self.assert_compile(
and_(c1 == True, c2 == True, c3 == True), # noqa
"x = :x_1 AND x = true AND x = :x_2",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
self.assert_compile(
and_(c1 == 3, c2 == 3, c3 == 3),
"x = :x_1 AND x = :x_2 AND x = :x_3",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
self.assert_compile(
and_(c1.is_(True), c2.is_(True), c3.is_(True)),
"x IS :x_1 AND x IS true AND x IS :x_2",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
def test_typedec_righthand_coercion(self, connection):
class MyTypeDec(types.TypeDecorator):
impl = String
cache_ok = True
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
tab = table("test", column("bvalue", MyTypeDec))
expr = tab.c.bvalue + 6
self.assert_compile(
expr, "test.bvalue || :bvalue_1", use_default_dialect=True
)
is_(expr.right.type.__class__, MyTypeDec)
is_(expr.type.__class__, MyTypeDec)
eq_(
connection.execute(select(expr.label("foo"))).scalar(),
"BIND_INfooBIND_IN6BIND_OUT",
)
def test_variant_righthand_coercion_honors_wrapped(self):
my_json_normal = JSON()
my_json_variant = JSON().with_variant(String(), "sqlite")
tab = table(
"test",
column("avalue", my_json_normal),
column("bvalue", my_json_variant),
)
expr = tab.c.avalue["foo"] == "bar"
is_(expr.right.type._type_affinity, String)
is_not(expr.right.type, my_json_normal)
expr = tab.c.bvalue["foo"] == "bar"
is_(expr.right.type._type_affinity, String)
is_not(expr.right.type, my_json_variant)
def test_variant_righthand_coercion_returns_self(self):
my_datetime_normal = DateTime()
my_datetime_variant = DateTime().with_variant(
dialects.sqlite.DATETIME(truncate_microseconds=False), "sqlite"
)
tab = table(
"test",
column("avalue", my_datetime_normal),
column("bvalue", my_datetime_variant),
)
expr = tab.c.avalue == datetime.datetime(2015, 10, 14, 15, 17, 18)
is_(expr.right.type._type_affinity, DateTime)
is_(expr.right.type, my_datetime_normal)
expr = tab.c.bvalue == datetime.datetime(2015, 10, 14, 15, 17, 18)
is_(expr.right.type, my_datetime_variant)
def test_bind_typing(self):
from sqlalchemy.sql import column
class MyFoobarType(types.UserDefinedType):
pass
class Foo:
pass
# unknown type + integer, right hand bind
# coerces to given type
expr = column("foo", MyFoobarType) + 5
assert expr.right.type._type_affinity is MyFoobarType
# untyped bind - it gets assigned MyFoobarType
bp = bindparam("foo")
expr = column("foo", MyFoobarType) + bp
assert bp.type._type_affinity is types.NullType # noqa
assert expr.right.type._type_affinity is MyFoobarType
expr = column("foo", MyFoobarType) + bindparam("foo", type_=Integer)
assert expr.right.type._type_affinity is types.Integer
# unknown type + unknown, right hand bind
# coerces to the left
expr = column("foo", MyFoobarType) + Foo()
assert expr.right.type._type_affinity is MyFoobarType
# including for non-commutative ops
expr = column("foo", MyFoobarType) - Foo()
assert expr.right.type._type_affinity is MyFoobarType
expr = column("foo", MyFoobarType) - datetime.date(2010, 8, 25)
assert expr.right.type._type_affinity is MyFoobarType
def test_date_coercion(self):
expr = column("bar", types.NULLTYPE) - column("foo", types.TIMESTAMP)
eq_(expr.type._type_affinity, types.NullType)
expr = func.sysdate() - column("foo", types.TIMESTAMP)
eq_(expr.type._type_affinity, types.Interval)
expr = func.current_date() - column("foo", types.TIMESTAMP)
eq_(expr.type._type_affinity, types.Interval)
def test_interval_coercion(self):
expr = column("bar", types.Interval) + column("foo", types.Date)
eq_(expr.type._type_affinity, types.DateTime)
expr = column("bar", types.Interval) * column("foo", types.Numeric)
eq_(expr.type._type_affinity, types.Interval)
@testing.combinations(
(operator.add,),
(operator.mul,),
(operator.truediv,),
(operator.sub,),
argnames="op",
id_="n",
)
@testing.combinations(
(Numeric(10, 2),), (Integer(),), argnames="other", id_="r"
)
def test_numerics_coercion(self, op, other):
expr = op(column("bar", types.Numeric(10, 2)), column("foo", other))
assert isinstance(expr.type, types.Numeric)
expr = op(column("foo", other), column("bar", types.Numeric(10, 2)))
assert isinstance(expr.type, types.Numeric)
def test_asdecimal_int_to_numeric(self):
expr = column("a", Integer) * column("b", Numeric(asdecimal=False))
is_(expr.type.asdecimal, False)
expr = column("a", Integer) * column("b", Numeric())
is_(expr.type.asdecimal, True)
expr = column("a", Integer) * column("b", Float())
is_(expr.type.asdecimal, False)
assert isinstance(expr.type, Float)
def test_asdecimal_numeric_to_int(self):
expr = column("a", Numeric(asdecimal=False)) * column("b", Integer)
is_(expr.type.asdecimal, False)
expr = column("a", Numeric()) * column("b", Integer)
is_(expr.type.asdecimal, True)
expr = column("a", Float()) * column("b", Integer)
is_(expr.type.asdecimal, False)
assert isinstance(expr.type, Float)
def test_null_comparison(self):
eq_(
str(column("a", types.NullType()) + column("b", types.NullType())),
"a + b",
)
def test_expression_typing(self):
expr = column("bar", Integer) - 3
eq_(expr.type._type_affinity, Integer)
expr = bindparam("bar") + bindparam("foo")
eq_(expr.type, types.NULLTYPE)
def test_distinct(self, connection):
test_table = self.tables.test
s = select(distinct(test_table.c.avalue))
eq_(connection.execute(s).scalar(), 25)
s = select(test_table.c.avalue.distinct())
eq_(connection.execute(s).scalar(), 25)
assert distinct(test_table.c.data).type == test_table.c.data.type
assert test_table.c.data.distinct().type == test_table.c.data.type
def test_detect_coercion_of_builtins(self):
@inspection._self_inspects
class SomeSQLAThing:
def __repr__(self):
return "some_sqla_thing()"
class SomeOtherThing:
pass
assert_raises_message(
exc.ArgumentError,
r"SQL expression element or literal value expected, got "
r"some_sqla_thing\(\).",
lambda: column("a", String) == SomeSQLAThing(),
)
is_(bindparam("x", SomeOtherThing()).type, types.NULLTYPE)
def test_detect_coercion_not_fooled_by_mock(self):
m1 = mock.Mock()
is_(bindparam("x", m1).type, types.NULLTYPE)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_compile_err_formatting(self):
with expect_raises_message(
exc.CompileError,
r"Don't know how to render literal SQL value: \(1, 2, 3\)",
):
func.foo((1, 2, 3)).compile(compile_kwargs={"literal_binds": True})
def test_strict_bool_err_formatting(self):
typ = Boolean()
dialect = default.DefaultDialect()
with expect_raises_message(
TypeError,
r"Not a boolean value: \(5,\)",
):
typ.bind_processor(dialect)((5,))
@testing.requires.unbounded_varchar
def test_string_plain(self):
self.assert_compile(String(), "VARCHAR")
def test_string_length(self):
self.assert_compile(String(50), "VARCHAR(50)")
def test_string_collation(self):
self.assert_compile(
String(50, collation="FOO"), 'VARCHAR(50) COLLATE "FOO"'
)
def test_char_plain(self):
self.assert_compile(CHAR(), "CHAR")
def test_char_length(self):
self.assert_compile(CHAR(50), "CHAR(50)")
def test_char_collation(self):
self.assert_compile(
CHAR(50, collation="FOO"), 'CHAR(50) COLLATE "FOO"'
)
def test_text_plain(self):
self.assert_compile(Text(), "TEXT")
def test_text_length(self):
self.assert_compile(Text(50), "TEXT(50)")
def test_text_collation(self):
self.assert_compile(Text(collation="FOO"), 'TEXT COLLATE "FOO"')
def test_default_compile_pg_inet(self):
self.assert_compile(
dialects.postgresql.INET(), "INET", allow_dialect_select=True
)
def test_default_compile_pg_float(self):
self.assert_compile(
dialects.postgresql.FLOAT(), "FLOAT", allow_dialect_select=True
)
def test_default_compile_double(self):
self.assert_compile(Double(), "DOUBLE")
def test_default_compile_mysql_integer(self):
self.assert_compile(
dialects.mysql.INTEGER(display_width=5),
"INTEGER",
allow_dialect_select=True,
)
self.assert_compile(
dialects.mysql.INTEGER(display_width=5),
"INTEGER(5)",
dialect="mysql",
)
def test_numeric_plain(self):
self.assert_compile(types.NUMERIC(), "NUMERIC")
def test_numeric_precision(self):
self.assert_compile(types.NUMERIC(2), "NUMERIC(2)")
def test_numeric_scale(self):
self.assert_compile(types.NUMERIC(2, 4), "NUMERIC(2, 4)")
def test_decimal_plain(self):
self.assert_compile(types.DECIMAL(), "DECIMAL")
def test_decimal_precision(self):
self.assert_compile(types.DECIMAL(2), "DECIMAL(2)")
def test_decimal_scale(self):
self.assert_compile(types.DECIMAL(2, 4), "DECIMAL(2, 4)")
def test_kwarg_legacy_typecompiler(self):
from sqlalchemy.sql import compiler
class SomeTypeCompiler(compiler.GenericTypeCompiler):
# transparently decorated w/ kw decorator
def visit_VARCHAR(self, type_):
return "MYVARCHAR"
# not affected
def visit_INTEGER(self, type_, **kw):
return "MYINTEGER %s" % kw["type_expression"].name
dialect = default.DefaultDialect()
dialect.type_compiler = SomeTypeCompiler(dialect)
self.assert_compile(
ddl.CreateColumn(Column("bar", VARCHAR(50))),
"bar MYVARCHAR",
dialect=dialect,
)
self.assert_compile(
ddl.CreateColumn(Column("bar", INTEGER)),
"bar MYINTEGER bar",
dialect=dialect,
)
class TestKWArgPassThru(AssertsCompiledSQL, fixtures.TestBase):
__backend__ = True
def test_user_defined(self):
"""test that dialects pass the column through on DDL."""
class MyType(types.UserDefinedType):
def get_col_spec(self, **kw):
return "FOOB %s" % kw["type_expression"].name
m = MetaData()
t = Table("t", m, Column("bar", MyType, nullable=False))
self.assert_compile(ddl.CreateColumn(t.c.bar), "bar FOOB bar NOT NULL")
class NumericRawSQLTest(fixtures.TestBase):
"""Test what DBAPIs and dialects return without any typing
information supplied at the SQLA level.
"""
__backend__ = True
def _fixture(self, connection, metadata, type_, data):
t = Table("t", metadata, Column("val", type_))
metadata.create_all(connection)
connection.execute(t.insert(), dict(val=data))
@testing.requires.numeric_received_as_decimal_untyped
@testing.provide_metadata
def test_decimal_fp(self, connection):
metadata = self.metadata
self._fixture(
connection, metadata, Numeric(10, 5), decimal.Decimal("45.5")
)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45.5"))
@testing.requires.numeric_received_as_decimal_untyped
@testing.provide_metadata
def test_decimal_int(self, connection):
metadata = self.metadata
self._fixture(
connection, metadata, Numeric(10, 5), decimal.Decimal("45")
)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45"))
@testing.provide_metadata
def test_ints(self, connection):
metadata = self.metadata
self._fixture(connection, metadata, Integer, 45)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, int)
eq_(val, 45)
@testing.provide_metadata
def test_float(self, connection):
metadata = self.metadata
self._fixture(connection, metadata, Float, 46.583)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, float)
eq_(val, 46.583)
class IntervalTest(fixtures.TablesTest, AssertsExecutionResults):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"intervals",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("native_interval", Interval()),
Column(
"native_interval_args",
Interval(day_precision=3, second_precision=6),
),
Column("non_native_interval", Interval(native=False)),
)
def test_non_native_adapt(self):
interval = Interval(native=False)
adapted = interval.dialect_impl(testing.db.dialect)
assert isinstance(adapted, Interval)
assert adapted.native is False
eq_(str(adapted), "DATETIME")
def test_roundtrip(self, connection):
interval_table = self.tables.intervals
small_delta = datetime.timedelta(days=15, seconds=5874)
delta = datetime.timedelta(14)
connection.execute(
interval_table.insert(),
dict(
native_interval=small_delta,
native_interval_args=delta,
non_native_interval=delta,
),
)
row = connection.execute(interval_table.select()).first()
eq_(row.native_interval, small_delta)
eq_(row.native_interval_args, delta)
eq_(row.non_native_interval, delta)
def test_null(self, connection):
interval_table = self.tables.intervals
connection.execute(
interval_table.insert(),
dict(
id=1,
native_inverval=None,
non_native_interval=None,
),
)
row = connection.execute(interval_table.select()).first()
eq_(row.native_interval, None)
eq_(row.native_interval_args, None)
eq_(row.non_native_interval, None)
class IntegerTest(fixtures.TestBase):
__backend__ = True
def test_integer_literal_processor(self):
typ = Integer()
eq_(typ._cached_literal_processor(testing.db.dialect)(5), "5")
assert_raises(
ValueError,
typ._cached_literal_processor(testing.db.dialect),
"notanint",
)
class BooleanTest(
fixtures.TablesTest, AssertsExecutionResults, AssertsCompiledSQL
):
"""test edge cases for booleans. Note that the main boolean test suite
is now in testing/suite/test_types.py
the default value of create_constraint was changed to False in
version 1.4 with #5367.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"boolean_table",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("value", Boolean(create_constraint=True)),
Column("unconstrained_value", Boolean()),
)
@testing.requires.enforces_check_constraints
@testing.requires.non_native_boolean_unconstrained
def test_constraint(self, connection):
assert_raises(
(
exc.IntegrityError,
exc.ProgrammingError,
exc.OperationalError,
exc.InternalError, # older pymysql's do this
),
connection.exec_driver_sql,
"insert into boolean_table (id, value) values(1, 5)",
)
@testing.skip_if(lambda: testing.db.dialect.supports_native_boolean)
def test_unconstrained(self, connection):
connection.exec_driver_sql(
"insert into boolean_table (id, unconstrained_value)"
"values (1, 5)"
)
def test_non_native_constraint_custom_type(self):
class Foob:
def __init__(self, value):
self.value = value
class MyBool(TypeDecorator):
impl = Boolean(create_constraint=True)
cache_ok = True
# future method
def process_literal_param(self, value, dialect):
return value.value
def process_bind_param(self, value, dialect):
return value.value
m = MetaData()
t1 = Table("t", m, Column("x", MyBool()))
const = [c for c in t1.constraints if isinstance(c, CheckConstraint)][
0
]
self.assert_compile(
AddConstraint(const),
"ALTER TABLE t ADD CHECK (x IN (0, 1))",
dialect="sqlite",
)
@testing.skip_if(lambda: testing.db.dialect.supports_native_boolean)
def test_nonnative_processor_coerces_to_onezero(self):
boolean_table = self.tables.boolean_table
with testing.db.connect() as conn:
assert_raises_message(
exc.StatementError,
"Value 5 is not None, True, or False",
conn.execute,
boolean_table.insert(),
{"id": 1, "unconstrained_value": 5},
)
@testing.requires.non_native_boolean_unconstrained
def test_nonnative_processor_coerces_integer_to_boolean(self, connection):
boolean_table = self.tables.boolean_table
connection.exec_driver_sql(
"insert into boolean_table (id, unconstrained_value) "
"values (1, 5)"
)
eq_(
connection.exec_driver_sql(
"select unconstrained_value from boolean_table"
).scalar(),
5,
)
eq_(
connection.scalar(select(boolean_table.c.unconstrained_value)),
True,
)
def test_bind_processor_coercion_native_true(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=True)
)
is_(proc(True), True)
def test_bind_processor_coercion_native_false(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=True)
)
is_(proc(False), False)
def test_bind_processor_coercion_native_none(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=True)
)
is_(proc(None), None)
def test_bind_processor_coercion_native_0(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=True)
)
is_(proc(0), False)
def test_bind_processor_coercion_native_1(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=True)
)
is_(proc(1), True)
def test_bind_processor_coercion_native_str(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=True)
)
assert_raises_message(
TypeError, "Not a boolean value: 'foo'", proc, "foo"
)
def test_bind_processor_coercion_native_int_out_of_range(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=True)
)
assert_raises_message(
ValueError, "Value 15 is not None, True, or False", proc, 15
)
def test_bind_processor_coercion_nonnative_true(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=False)
)
eq_(proc(True), 1)
def test_bind_processor_coercion_nonnative_false(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=False)
)
eq_(proc(False), 0)
def test_bind_processor_coercion_nonnative_none(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=False)
)
is_(proc(None), None)
def test_bind_processor_coercion_nonnative_0(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=False)
)
eq_(proc(0), 0)
def test_bind_processor_coercion_nonnative_1(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=False)
)
eq_(proc(1), 1)
def test_bind_processor_coercion_nonnative_str(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=False)
)
assert_raises_message(
TypeError, "Not a boolean value: 'foo'", proc, "foo"
)
def test_bind_processor_coercion_nonnative_int_out_of_range(self):
proc = Boolean().bind_processor(
mock.Mock(supports_native_boolean=False)
)
assert_raises_message(
ValueError, "Value 15 is not None, True, or False", proc, 15
)
def test_literal_processor_coercion_native_true(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=True)
)
eq_(proc(True), "true")
def test_literal_processor_coercion_native_false(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=True)
)
eq_(proc(False), "false")
def test_literal_processor_coercion_native_1(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=True)
)
eq_(proc(1), "true")
def test_literal_processor_coercion_native_0(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=True)
)
eq_(proc(0), "false")
def test_literal_processor_coercion_native_str(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=True)
)
assert_raises_message(
TypeError, "Not a boolean value: 'foo'", proc, "foo"
)
def test_literal_processor_coercion_native_int_out_of_range(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=True)
)
assert_raises_message(
ValueError, "Value 15 is not None, True, or False", proc, 15
)
def test_literal_processor_coercion_nonnative_true(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=False)
)
eq_(proc(True), "1")
def test_literal_processor_coercion_nonnative_false(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=False)
)
eq_(proc(False), "0")
def test_literal_processor_coercion_nonnative_1(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=False)
)
eq_(proc(1), "1")
def test_literal_processor_coercion_nonnative_0(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=False)
)
eq_(proc(0), "0")
def test_literal_processor_coercion_nonnative_str(self):
proc = Boolean().literal_processor(
default.DefaultDialect(supports_native_boolean=False)
)
assert_raises_message(
TypeError, "Not a boolean value: 'foo'", proc, "foo"
)
class PickleTest(fixtures.TestBase):
def test_eq_comparison(self):
p1 = PickleType()
for obj in (
{"1": "2"},
pickleable.Bar(5, 6),
pickleable.OldSchool(10, 11),
):
assert p1.compare_values(p1.copy_value(obj), obj)
assert_raises(
NotImplementedError,
p1.compare_values,
pickleable.BrokenComparable("foo"),
pickleable.BrokenComparable("foo"),
)
def test_nonmutable_comparison(self):
p1 = PickleType()
for obj in (
{"1": "2"},
pickleable.Bar(5, 6),
pickleable.OldSchool(10, 11),
):
assert p1.compare_values(p1.copy_value(obj), obj)
@testing.combinations(
None, mysql.LONGBLOB, LargeBinary, mysql.LONGBLOB(), LargeBinary()
)
def test_customized_impl(self, impl):
"""test #6646"""
if impl is None:
p1 = PickleType()
assert isinstance(p1.impl, LargeBinary)
else:
p1 = PickleType(impl=impl)
if not isinstance(impl, type):
impl = type(impl)
assert isinstance(p1.impl, impl)
class CallableTest(fixtures.TestBase):
@testing.provide_metadata
def test_callable_as_arg(self, connection):
ucode = util.partial(Unicode)
thing_table = Table("thing", self.metadata, Column("name", ucode(20)))
assert isinstance(thing_table.c.name.type, Unicode)
thing_table.create(connection)
@testing.provide_metadata
def test_callable_as_kwarg(self, connection):
ucode = util.partial(Unicode)
thang_table = Table(
"thang",
self.metadata,
Column("name", type_=ucode(20), primary_key=True),
)
assert isinstance(thang_table.c.name.type, Unicode)
thang_table.create(connection)
class LiteralTest(fixtures.TestBase):
__backend__ = True
@testing.combinations(
("datetime", datetime.datetime.now()),
("date", datetime.date.today()),
("time", datetime.time()),
argnames="value",
id_="ia",
)
@testing.skip_if(testing.requires.datetime_literals)
def test_render_datetime(self, value):
lit = literal(value)
assert_raises_message(
NotImplementedError,
"Don't know how to literal-quote value.*",
lit.compile,
dialect=testing.db.dialect,
compile_kwargs={"literal_binds": True},
)
class ResolveForLiteralTest(fixtures.TestBase):
"""test suite for literal resolution, includes tests for
#7537 and #7551
"""
@testing.combinations(
(
datetime.datetime(
2012, 10, 15, 12, 57, 18, tzinfo=datetime.timezone.utc
),
sqltypes.DATETIME_TIMEZONE,
),
(datetime.datetime(2012, 10, 15, 12, 57, 18, 396), sqltypes._DATETIME),
(
datetime.time(12, 57, 18, tzinfo=datetime.timezone.utc),
sqltypes.TIME_TIMEZONE,
),
(datetime.time(12, 57, 18), sqltypes._TIME),
("réve🐍 illé", sqltypes._UNICODE),
("hello", sqltypes._STRING),
("réveillé", sqltypes._UNICODE),
)
def test_resolve(self, value, expected):
is_(literal(value).type, expected)
| 31.674634 | 79 | 0.556435 |
acf16571845ec5a3782ddc261584d056e12b730c | 9,160 | py | Python | pybuildtool/core/task.py | dozymoe/PyBuildTool | d938a8d6335b801e102159e82a6e0002dfaa1b1a | [
"MIT"
] | 5 | 2017-02-10T07:54:49.000Z | 2017-07-11T09:14:26.000Z | pybuildtool/core/task.py | dozymoe/PyBuildTool | d938a8d6335b801e102159e82a6e0002dfaa1b1a | [
"MIT"
] | null | null | null | pybuildtool/core/task.py | dozymoe/PyBuildTool | d938a8d6335b801e102159e82a6e0002dfaa1b1a | [
"MIT"
] | 1 | 2017-05-21T20:35:10.000Z | 2017-05-21T20:35:10.000Z | """
Base class for pybuildtools tools.
Options:
* _source_excluded_ : list, None
: Pretend source files (*_in values) don't exist.
* _source_basedir_ : str, None
: Create files in output dir, relative path to source
: base directory.
* _source_grouped_ : bool, None
: Don't create separate tasks for every input files, have
: them as input files of a single task.
: Actually I'm not so sure what this does, something like
: have them all as arguments to shell command?
* _noop_retcodes_ : list, None
: If Task.perform() returns these, pretend nothing
: happened.
* _success_retcodes_ : list, None
: If Task.perform() returns these, pretend as if it
: returns 0 or a success.
* _replace_patterns_ : list, None
: If the output is a directory, you can rename the
: output files based on the source files.
: This is a list of list.
: The list elements consist of two items: python regex
: and replacement.
* _no_io_ : bool, False
: This task doesn't need inputs or outputs.
: Only works if written in build.yml.
"""
import os
from copy import deepcopy
from time import time
from uuid import uuid4
import stringcase
from waflib.Task import Task as BaseTask # pylint:disable=import-error
from ..misc.collections_utils import make_list
from ..misc.path import expand_resource
class Task(BaseTask):
args = None
args_case = 'spinal'
conf = None
group = None
file_in = None
file_out = None
name = None
token_in = None
token_out = None
_id = None
def __init__(self, group, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self._id = uuid4().hex
# Task's configuration can be declared higher in the build tree,
# but it needs to be prefixed with its tool-name.
# Tool-name however can only be defined by the tool's module by
# observing predefined `__name__` variable, which value is the name
# of the tool's module.
if config:
my_config = deepcopy(config)
if self.name:
name = self.name + '_'
for key in config.keys():
if not key.startswith(name):
continue
task_conf = key[len(name):]
if task_conf in config:
continue
my_config[task_conf] = config[key]
else:
my_config = {}
self.args = []
self.conf = my_config
self.group = group
self.file_in = []
self.file_out = []
self.token_in = []
self.token_out = []
def prepare(self):
pass
def prepare_args(self): # pylint:disable=no-self-use
return []
def prepare_shadow_jutsu(self):
source_exclude = []
for f in make_list(self.conf.get('_source_excluded_')):
nodes = expand_resource(self.group, f)
source_exclude += make_list(nodes)
task_uid = self._id
for node in self.inputs:
path = node.abspath()
if node.parent.name == '.tokens':
self.token_in.append(path)
elif getattr(node, 'is_virtual_in_' + task_uid, False):
pass
elif path in source_exclude:
pass
else:
self.file_in.append(path)
for node in self.outputs:
path = node.abspath()
if node.parent.name == '.tokens':
self.token_out.append(path)
elif getattr(node, 'is_virtual_out_' + task_uid, False):
pass
else:
self.file_out.append(path)
def finalize_shadow_jutsu(self, create_only=False):
for filename in self.token_out:
if create_only and os.path.exists(filename):
continue
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, 'w', encoding='utf-8') as f:
f.write(str(time()))
def run(self):
self.prepare_shadow_jutsu()
self.prepare()
ret = self.perform()
create_only = False
if ret in make_list(self.conf.get('_noop_retcodes_')):
create_only = True
ret = None
elif ret in make_list(self.conf.get('_success_retcodes_')):
ret = None
if not ret:
self.finalize_shadow_jutsu(create_only)
return ret
@staticmethod
def is_production():
return os.environ.get('PROJECT_VARIANT_IS_PRODUCTION') == '1'
def stringcase_arg(self, option):
return getattr(stringcase, self.args_case + 'case')(option)
def _add_arg(self, option, value, sep):
if sep == ' ':
self.args.append(option)
self.args.append(value)
else:
self.args.append(option + sep + value)
def add_bool_args(self, *options):
for option in options:
value = self.conf.get(option)
if not value:
continue
option = '--' + self.stringcase_arg(option)
self.args.append(option)
def add_dict_args(self, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
key_val_sep = kwargs.get('key_val_sep', '=')
for option in options:
if option not in self.conf:
continue
for key, value in self.conf[option].items():
value = value.format(**self.group.get_patterns())
item = key + key_val_sep + value
self._add_arg(option, item, opt_val_sep)
def add_int_args(self, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
for option in options:
try:
value = int(self.conf.get(option))
except (TypeError, ValueError):
continue
option = '--' + self.stringcase_arg(option)
self._add_arg(option, str(value), opt_val_sep)
def add_list_args_join(self, separator, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
for option in options:
values = make_list(self.conf.get(option))
if not values:
continue
option = '--' + self.stringcase_arg(option)
value = separator.join(x.format(**self.group.get_patterns())\
for x in values)
self._add_arg(option, value, opt_val_sep)
def add_list_args_multi(self, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
for option in options:
values = make_list(self.conf.get(option))
if not values:
continue
option = '--' + self.stringcase_arg(option)
for value in values:
value = value.format(**self.group.get_patterns())
self._add_arg(option, value, opt_val_sep)
def add_path_args(self, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
for option in options:
value = self.conf.get(option)
if value is None:
continue
option = '--' + self.stringcase_arg(option)
value = expand_resource(self.group, value)
self._add_arg(option, value, opt_val_sep)
def add_path_list_args_join(self, separator, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
for option in options:
values = make_list(self.conf.get(option))
if not values:
continue
option = '--' + self.stringcase_arg(option)
value = separator.join(expand_resource(self.group, x)\
for x in values)
self._add_arg(option, value, opt_val_sep)
def add_path_list_args_multi(self, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
for option in options:
values = make_list(self.conf.get(option))
if not values:
continue
option = '--' + self.stringcase_arg(option)
for value in values:
value = expand_resource(self.group, value)
self._add_arg(option, value, opt_val_sep)
def add_str_args(self, *options, **kwargs):
opt_val_sep = kwargs.get('opt_val_sep', '=')
for option in options:
value = self.conf.get(option)
if value is None:
continue
option = '--' + self.stringcase_arg(option)
value = value.format(**self.group.get_patterns())
self._add_arg(option, value, opt_val_sep)
| 30.945946 | 80 | 0.547707 |
acf1657c1f07542763434d775d1e71498efd6861 | 5,767 | py | Python | dataset/gla_vol_time_series/format_datasharing_csvs.py | subond/ww_tvol_study | 6fbcae251015a7cd49220abbb054914266b3b4a1 | [
"MIT"
] | 20 | 2021-04-28T18:11:43.000Z | 2022-03-09T13:15:56.000Z | dataset/gla_vol_time_series/format_datasharing_csvs.py | subond/ww_tvol_study | 6fbcae251015a7cd49220abbb054914266b3b4a1 | [
"MIT"
] | 4 | 2021-04-28T15:51:43.000Z | 2022-01-02T19:10:25.000Z | dataset/gla_vol_time_series/format_datasharing_csvs.py | rhugonnet/ww_tvol_study | f29fc2fca358aa169f6b7cc790e6b6f9f8b55c6f | [
"MIT"
] | 9 | 2021-04-28T17:58:27.000Z | 2021-12-19T05:51:56.000Z | import pandas as pd
import os
# script to format final files for data sharing
# PER GLACIER
# per glacier cumulative series
list_fn_cumul_pergla = ['/home/atom/ongoing/work_worldwide/vol/dh_06_rgi60_int_base.csv']
for fn_cumul_pergla in list_fn_cumul_pergla:
df = pd.read_csv(fn_cumul_pergla,index_col=0)
df = df.round({'dh':3,'err_dh':3,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':3,'err_corr_150':3,'err_corr_2000':3,
'err_corr_5000':3,'err_corr_20000':3,'err_corr_50000':3,'err_corr_200000':3,'valid_obs':2,'valid_obs_py':2,
'area':0,'lon':4,'lat':4,'perc_err_cont':3})
df = df[['rgiid','time','area','dh','err_dh','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','dt','std_dt'
,'err_corr_150','err_corr_2000','err_corr_5000','err_corr_20000','err_corr_50000','err_corr_200000','lat','lon']]
df.to_csv(os.path.join(os.path.dirname(fn_cumul_pergla),os.path.splitext(os.path.basename(fn_cumul_pergla))[0]+'_fmt.csv'))
# RGI O1 REGIONS WITH TAREA
# RGI O1 regional cumulative series
list_fn_cumul_reg = ['/home/atom/ongoing/work_worldwide/vol/final/dh_01_rgi60_int_base_reg.csv']
for fn_cumul_reg in list_fn_cumul_reg:
df = pd.read_csv(fn_cumul_reg)
df = df.drop(columns=['area_valid_obs_py','perc_err_cont'])
df = df.round({'dh':3,'err_dh':3,'dvol':0,'err_dvol':0,'dm':4,'err_dm':4,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':3
,'valid_obs':2,'valid_obs_py':2, 'area':0,'area_nodata':0})
df = df[['reg','time','area','dh','err_dh','dvol','err_dvol','dm','err_dm','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]
df.to_csv(os.path.join(os.path.dirname(fn_cumul_reg),os.path.splitext(os.path.basename(fn_cumul_reg))[0]+'_fmt.csv'),index=None)
# RGI O1 regional rates
list_fn_rates_reg = ['/home/atom/ongoing/work_worldwide/vol/final/dh_01_rgi60_int_base_reg_subperiods.csv']
for fn_rates_reg in list_fn_rates_reg:
df = pd.read_csv(fn_rates_reg,index_col=0)
df = df.round({'dhdt':3,'err_dhdt':3,'dvoldt':0,'err_dvoldt':0,'dmdt':4,'err_dmdt':4,'dmdtda':3,'err_dmdtda':3,'perc_area_meas':3,'perc_area_res':3,
'valid_obs':2,'valid_obs_py':2,'area':0,'area_nodata':0, 'tarea':0})
df = df[['reg','period','area','tarea','dhdt','err_dhdt','dvoldt','err_dvoldt','dmdt','err_dmdt','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]
df.to_csv(os.path.join(os.path.dirname(fn_rates_reg),os.path.splitext(os.path.basename(fn_rates_reg))[0]+'_fmt.csv'),index=None)
# TILES
# tile cumulative series
list_fn_cumul_tile = ['/home/atom/ongoing/work_worldwide/vol/final/dh_world_tiles_2deg.csv']
for fn_cumul_tile in list_fn_cumul_tile:
df = pd.read_csv(fn_cumul_tile)
df = df.drop(columns=['area_valid_obs_py','perc_err_cont'])
df = df.round({'dh':3,'err_dh':3,'dvol':0,'err_dvol':0,'dm':4,'err_dm':4,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':3
,'valid_obs':2,'valid_obs_py':2, 'area':0,'area_nodata':0,'tile_lonmin':1,'tile_latmin':1,'tile_size':1})
df = df[['tile_lonmin','tile_latmin','tile_size','time','area','dh','err_dh','dvol','err_dvol','dm','err_dm','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]
df.to_csv(os.path.join(os.path.dirname(fn_cumul_tile),os.path.splitext(os.path.basename(fn_cumul_tile))[0]+'_fmt.csv'),index=None)
# tile rates
list_fn_rates_tile = ['/home/atom/ongoing/work_worldwide/vol/final/dh_world_tiles_2deg_subperiods.csv']
for fn_rates_tile in list_fn_rates_tile:
df = pd.read_csv(fn_rates_tile,index_col=0)
df = df.drop(columns=['tarea'])
df = df.round({'dhdt':3,'err_dhdt':3,'dvoldt':0,'err_dvoldt':0,'dmdt':4,'err_dmdt':4,'dmdtda':3,'err_dmdtda':3,'perc_area_meas':3,'perc_area_res':3,
'valid_obs':2,'valid_obs_py':2,'area':0,'area_nodata':0,'tile_lonmin':1,'tile_latmin':1,'tile_size':1})
df = df[['tile_lonmin','tile_latmin','tile_size','period','area','dhdt','err_dhdt','dvoldt','err_dvoldt','dmdt','err_dmdt','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]
df.to_csv(os.path.join(os.path.dirname(fn_rates_tile),os.path.splitext(os.path.basename(fn_rates_tile))[0]+'_fmt.csv'),index=None)
#SHP with TW/NTW sorting
# shp cumulative series
list_fn_cumul_shp = ['/home/atom/ongoing/work_worldwide/vol/final/subreg_HIMAP_cumul.csv']
for fn_cumul_shp in list_fn_cumul_shp:
df = pd.read_csv(fn_cumul_shp)
df = df.round({'dh':3,'err_dh':3,'dvol':0,'err_dvol':0,'dm':4,'err_dm':4,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':3
,'valid_obs':2,'valid_obs_py':2, 'area':0,'area_nodata':0})
df = df[['subreg','time','area','dh','err_dh','dvol','err_dvol','dm','err_dm','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]
df.to_csv(os.path.join(os.path.dirname(fn_cumul_shp),os.path.splitext(os.path.basename(fn_cumul_shp))[0]+'_fmt.csv'),index=None)
# shp rates
list_fn_rates_shp = ['/home/atom/ongoing/work_worldwide/vol/final/subreg_HIMAP_rates.csv']
for fn_rates_shp in list_fn_rates_shp:
df = pd.read_csv(fn_rates_shp,index_col=0)
df = df.drop(columns=['tarea'])
df = df.round({'dhdt':3,'err_dhdt':3,'dvoldt':0,'err_dvoldt':0,'dmdt':4,'err_dmdt':4,'dmdtda':3,'err_dmdtda':3,'perc_area_meas':3,'perc_area_res':3,
'valid_obs':2,'valid_obs_py':2,'area':0,'area_nodata':0})
df = df[['subreg','period','area','dhdt','err_dhdt','dvoldt','err_dvoldt','dmdt','err_dmdt','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]
df.to_csv(os.path.join(os.path.dirname(fn_rates_shp),os.path.splitext(os.path.basename(fn_rates_shp))[0]+'_fmt.csv'),index=None)
| 71.197531 | 202 | 0.702445 |
acf165d7fc65c3219831b4162c2bbb005d87bb5b | 807 | py | Python | divnoising/noisy_img_fetcher.py | ashesh-0/DivNoising | 45a4d3f04041887bcc6a748e15c74520521c003a | [
"BSD-3-Clause"
] | null | null | null | divnoising/noisy_img_fetcher.py | ashesh-0/DivNoising | 45a4d3f04041887bcc6a748e15c74520521c003a | [
"BSD-3-Clause"
] | null | null | null | divnoising/noisy_img_fetcher.py | ashesh-0/DivNoising | 45a4d3f04041887bcc6a748e15c74520521c003a | [
"BSD-3-Clause"
] | null | null | null | import os
from multiprocessing.connection import Client
def get_noisy_img(img, num_angles):
input_dict = {'img': img, 'num_angles': num_angles}
port = int(os.environ['NOISY_API_PORT'])
address = ('localhost', port)
conn = Client(address)
conn.send(input_dict)
data_dict = conn.recv()
conn.close()
return data_dict['noisy_img']
if __name__ == '__main__':
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
num_angles = 10
img = np.array(
Image.open('/home/ashesh/Documents/PhD/data/notMNIST_large/data/A/Q2FzbG9uRml2ZUZvcnR5LVJvbWFuU0Mub3Rm.png'))
noisy_img = get_noisy_img(img[1:, 1:], num_angles)
_, ax = plt.subplots(figsize=(10, 5), ncols=2)
ax[0].imshow(img)
ax[1].imshow(noisy_img)
plt.show()
| 26.032258 | 117 | 0.67658 |
acf1666f7b0a00053cd37dc8537ebc9d8d574da7 | 31,149 | py | Python | hrl4in/scripts/train_ppo_base_reaching.py | vk-mittal14/HRL4IN | 692d5d897303bbc2f6cccfa34f86bf7c5396cdf2 | [
"MIT"
] | null | null | null | hrl4in/scripts/train_ppo_base_reaching.py | vk-mittal14/HRL4IN | 692d5d897303bbc2f6cccfa34f86bf7c5396cdf2 | [
"MIT"
] | null | null | null | hrl4in/scripts/train_ppo_base_reaching.py | vk-mittal14/HRL4IN | 692d5d897303bbc2f6cccfa34f86bf7c5396cdf2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from time import time
from collections import deque
import random
import numpy as np
import sys
import argparse
import torch
from torch.utils.tensorboard import SummaryWriter
import hrl4in
from hrl4in.utils.logging import logger
from hrl4in.rl.ppo import PPO, Policy, RolloutStorage
from hrl4in.utils.utils import *
from hrl4in.utils.args import *
import gibson2
from gibson2.envs.igibson_env import iGibsonEnv
from gibson2.envs.parallel_env import ParallelNavEnv
# from gibson2.envs.locomotor_env import NavigateEnv, NavigateRandomEnv, InteractiveNavigateEnv
def evaluate(
envs,
actor_critic,
hidden_size,
num_eval_episodes,
device,
writer,
action_mask,
update=0,
count_steps=0,
eval_only=False,
):
observations = envs.reset()
batch = batch_obs(observations)
for sensor in batch:
batch[sensor] = batch[sensor].to(device)
episode_rewards = torch.zeros(envs._num_envs, 1, device=device)
episode_success_rates = torch.zeros(envs._num_envs, 1, device=device)
episode_lengths = torch.zeros(envs._num_envs, 1, device=device)
episode_collision_steps = torch.zeros(envs._num_envs, 1, device=device)
# episode_total_energy_costs = torch.zeros(envs._num_envs, 1, device=device)
# episode_avg_energy_costs = torch.zeros(envs._num_envs, 1, device=device)
# episode_stage_open_door = torch.zeros(envs._num_envs, 1, device=device)
# episode_stage_to_target = torch.zeros(envs._num_envs, 1, device=device)
episode_counts = torch.zeros(envs._num_envs, 1, device=device)
current_episode_reward = torch.zeros(envs._num_envs, 1, device=device)
recurrent_hidden_states = torch.zeros(envs._num_envs, hidden_size, device=device)
masks = torch.zeros(envs._num_envs, 1, device=device)
while episode_counts.sum() < num_eval_episodes:
with torch.no_grad():
_, actions, _, recurrent_hidden_states = actor_critic.act(
batch,
recurrent_hidden_states,
masks,
deterministic=True,
update=0,
)
actions_np = actions.cpu().numpy()
actions_np = actions_np*action_mask
outputs = envs.step(actions_np)
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
batch = batch_obs(observations)
for sensor in batch:
batch[sensor] = batch[sensor].to(device)
rewards = torch.tensor(rewards, dtype=torch.float, device=device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=device,
)
success_masks = torch.tensor(
[
[1.0] if done and "success" in info and info["success"] else [0.0]
for done, info in zip(dones, infos)
],
dtype=torch.float,
device=device,
)
lengths = torch.tensor(
[
[float(info["episode_length"])]
if done and "episode_length" in info
else [0.0]
for done, info in zip(dones, infos)
],
dtype=torch.float,
device=device,
)
collision_steps = torch.tensor(
[
[float(info["collision_step"])]
if done and "collision_step" in info
else [0.0]
for done, info in zip(dones, infos)
],
dtype=torch.float,
device=device,
)
# total_energy_cost = torch.tensor(
# [[float(info["energy_cost"])] if done and "energy_cost" in info else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
# avg_energy_cost = torch.tensor(
# [[float(info["energy_cost"]) / float(info["episode_length"])]
# if done and "energy_cost" in info and "episode_length" in info
# else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
# stage_open_door = torch.tensor(
# [[float(info["stage"] >= 1)] if done and "stage" in info else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
# stage_to_target = torch.tensor(
# [[float(info["stage"] >= 2)] if done and "stage" in info else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
current_episode_reward += rewards
episode_rewards += (1 - masks) * current_episode_reward
episode_success_rates += success_masks
episode_lengths += lengths
episode_collision_steps += collision_steps
# episode_total_energy_costs += total_energy_cost
# episode_avg_energy_costs += avg_energy_cost
# episode_stage_open_door += stage_open_door
# episode_stage_to_target += stage_to_target
episode_counts += 1 - masks
current_episode_reward *= masks
episode_reward_mean = (episode_rewards.sum() / episode_counts.sum()).item()
episode_success_rate_mean = (
episode_success_rates.sum() / episode_counts.sum()
).item()
episode_length_mean = (episode_lengths.sum() / episode_counts.sum()).item()
episode_collision_step_mean = (
episode_collision_steps.sum() / episode_counts.sum()
).item()
# episode_total_energy_cost_mean = (episode_total_energy_costs.sum() / episode_counts.sum()).item()
# episode_avg_energy_cost_mean = (episode_avg_energy_costs.sum() / episode_counts.sum()).item()
# episode_stage_open_door_mean = (episode_stage_open_door.sum() / episode_counts.sum()).item()
# episode_stage_to_target_mean = (episode_stage_to_target.sum() / episode_counts.sum()).item()
if eval_only:
print(
"EVAL: num_eval_episodes: {}\treward: {:.3f}\t"
"success_rate: {:.3f}\tepisode_length: {:.3f}\tcollision_step: {:.3f}\t".format(
num_eval_episodes,
episode_reward_mean,
episode_success_rate_mean,
episode_length_mean,
episode_collision_step_mean,
# episode_total_energy_cost_mean,
# episode_avg_energy_cost_mean,
# episode_stage_open_door_mean,
# episode_stage_to_target_mean,
)
)
else:
logger.info(
"EVAL: num_eval_episodes: {}\tupdate: {}\t"
"reward: {:.3f}\tsuccess_rate: {:.3f}\tepisode_length: {:.3f}\tcollision_step: {:.3f}".format(
num_eval_episodes,
update,
episode_reward_mean,
episode_success_rate_mean,
episode_length_mean,
episode_collision_step_mean,
)
)
writer.add_scalar(
"eval/updates/reward", episode_reward_mean, global_step=update
)
writer.add_scalar(
"eval/updates/success_rate", episode_success_rate_mean, global_step=update
)
writer.add_scalar(
"eval/updates/episode_length", episode_length_mean, global_step=update
)
writer.add_scalar(
"eval/updates/collision_step",
episode_collision_step_mean,
global_step=update,
)
# writer.add_scalar(
# "eval/updates/total_energy_cost",
# episode_total_energy_cost_mean,
# global_step=update,
# )
# writer.add_scalar(
# "eval/updates/avg_energy_cost",
# episode_avg_energy_cost_mean,
# global_step=update,
# )
# writer.add_scalar(
# "eval/updates/stage_open_door",
# episode_stage_open_door_mean,
# global_step=update,
# )
# writer.add_scalar(
# "eval/updates/stage_to_target",
# episode_stage_to_target_mean,
# global_step=update,
# )
writer.add_scalar(
"eval/env_steps/reward", episode_reward_mean, global_step=count_steps
)
writer.add_scalar(
"eval/env_steps/success_rate",
episode_success_rate_mean,
global_step=count_steps,
)
writer.add_scalar(
"eval/env_steps/episode_length",
episode_length_mean,
global_step=count_steps,
)
writer.add_scalar(
"eval/env_steps/collision_step",
episode_collision_step_mean,
global_step=count_steps,
)
# writer.add_scalar(
# "eval/env_steps/total_energy_cost",
# episode_total_energy_cost_mean,
# global_step=count_steps,
# )
# writer.add_scalar(
# "eval/env_steps/avg_energy_cost",
# episode_avg_energy_cost_mean,
# global_step=count_steps,
# )
# writer.add_scalar(
# "eval/env_steps/stage_open_door",
# episode_stage_open_door_mean,
# global_step=count_steps,
# )
# writer.add_scalar(
# "eval/env_steps/stage_to_target",
# episode_stage_to_target_mean,
# global_step=count_steps,
# )
def main():
parser = argparse.ArgumentParser()
add_ppo_args(parser)
add_env_args(parser)
add_common_args(parser)
args = parser.parse_args()
(
ckpt_folder,
ckpt_path,
start_epoch,
start_env_step,
summary_folder,
log_file,
) = set_up_experiment_folder(args.experiment_folder, args.checkpoint_index, args.use_checkpoint)
random.seed(args.seed)
np.random.seed(args.seed)
device = torch.device("cuda:{}".format(args.pth_gpu_id))
logger.add_filehandler(log_file)
if not args.eval_only:
writer = SummaryWriter(log_dir=summary_folder)
else:
writer = None
for p in sorted(list(vars(args))):
logger.info("{}: {}".format(p, getattr(args, p)))
config_file = os.path.join(
os.path.dirname(gibson2.__file__), "../igibson_usage/new_configs", args.config_file
)
assert os.path.isfile(config_file), "config file does not exist: {}".format(
config_file
)
for (k, v) in parse_config(config_file).items():
logger.info("{}: {}".format(k, v))
def load_env(env_mode, device_idx):
return iGibsonEnv(
config_file=config_file,
mode=env_mode,
action_timestep=args.action_timestep,
physics_timestep=args.physics_timestep,
automatic_reset=True,
device_idx=device_idx,
)
sim_gpu_id = [int(gpu_id) for gpu_id in args.sim_gpu_id.split(",")]
env_id_to_which_gpu = np.linspace(
0,
len(sim_gpu_id),
num=args.num_train_processes + args.num_eval_processes,
dtype=np.int,
endpoint=False,
)
train_envs = [
lambda device_idx=sim_gpu_id[env_id_to_which_gpu[env_id]]: load_env(
"headless", device_idx
)
for env_id in range(args.num_train_processes)
]
train_envs = ParallelNavEnv(train_envs, blocking=False)
eval_envs = [
lambda device_idx=sim_gpu_id[env_id_to_which_gpu[env_id]]: load_env(
"headless", device_idx
)
for env_id in range(
args.num_train_processes,
args.num_train_processes + args.num_eval_processes - 1,
)
]
eval_envs += [lambda: load_env(args.env_mode, sim_gpu_id[env_id_to_which_gpu[-1]])]
eval_envs = ParallelNavEnv(eval_envs, blocking=False)
print(train_envs.observation_space, train_envs.action_space)
cnn_layers_params = [(32, 8, 4, 0), (64, 4, 2, 0), (64, 3, 1, 0)]
action_dim = train_envs.action_space.shape[0]
action_mask = np.ones(action_dim)
if args.use_base_only and (train_envs._envs[0].config["robot"] == "Tiago_Single"):
action_mask[2:] = 0
if args.use_arm_only and (train_envs._envs[0].config["robot"] == "Tiago_Single"):
action_mask[:2] = 0
actor_critic = Policy(
observation_space=train_envs.observation_space,
action_space=train_envs.action_space,
hidden_size=args.hidden_size,
cnn_layers_params=cnn_layers_params,
initial_stddev=args.action_init_std_dev,
min_stddev=args.action_min_std_dev,
stddev_anneal_schedule=args.action_std_dev_anneal_schedule,
stddev_transform=torch.nn.functional.softplus,
)
actor_critic.to(device)
agent = PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm,
use_clipped_value_loss=True,
)
if ckpt_path is not None:
ckpt = torch.load(ckpt_path, map_location=device)
agent.load_state_dict(ckpt["state_dict"])
logger.info("loaded checkpoing: {}".format(ckpt_path))
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in agent.parameters())
)
)
if args.eval_only:
evaluate(
eval_envs,
actor_critic,
args.hidden_size,
args.num_eval_episodes,
device,
writer,
action_mask,
update=0,
count_steps=0,
eval_only=True,
)
return
observations = train_envs.reset()
batch = batch_obs(observations)
rollouts = RolloutStorage(
args.num_steps,
train_envs._num_envs,
train_envs.observation_space,
train_envs.action_space,
args.hidden_size,
)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
rollouts.to(device)
episode_rewards = torch.zeros(train_envs._num_envs, 1)
episode_success_rates = torch.zeros(train_envs._num_envs, 1)
episode_lengths = torch.zeros(train_envs._num_envs, 1)
episode_collision_steps = torch.zeros(train_envs._num_envs, 1)
# episode_total_energy_costs = torch.zeros(train_envs._num_envs, 1, device=device)
# episode_avg_energy_costs = torch.zeros(train_envs._num_envs, 1, device=device)
# episode_stage_open_doors = torch.zeros(train_envs._num_envs, 1, device=device)
# episode_stage_to_targets = torch.zeros(train_envs._num_envs, 1, device=device)
episode_counts = torch.zeros(train_envs._num_envs, 1)
current_episode_reward = torch.zeros(train_envs._num_envs, 1)
window_episode_reward = deque()
window_episode_success_rates = deque()
window_episode_lengths = deque()
window_episode_collision_steps = deque()
# window_episode_total_energy_costs = deque()
# window_episode_avg_energy_costs = deque()
# window_episode_stage_open_doors = deque()
# window_episode_stage_to_targets = deque()
window_episode_counts = deque()
t_start = time()
env_time = 0
pth_time = 0
count_steps = start_env_step
for update in range(start_epoch, args.num_updates):
update_lr(
agent.optimizer,
args.lr,
update,
args.num_updates,
args.use_linear_lr_decay,
0,
)
agent.clip_param = args.clip_param * (1 - update / args.num_updates)
# collect num_steps tuples for each environment
for step in range(args.num_steps):
t_sample_action = time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[step] for k, v in rollouts.observations.items()
}
# values: [num_processes, 1]
# actions: [num_processes, 1]
# actions_log_probs: [num_processes, 1]
# recurrent_hidden_states: [num_processes, hidden_size]
(
values,
actions,
actions_log_probs,
recurrent_hidden_states,
) = actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[step],
rollouts.masks[step],
update=update,
)
pth_time += time() - t_sample_action
t_step_env = time()
actions_np = actions.cpu().numpy()
# outputs:
# [
# (observation, reward, done, info),
# ...
# ...
# (observation, reward, done, info),
# ]
# len(outputs) == num_processes
actions_np = actions_np*action_mask
outputs = train_envs.step(actions_np)
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
env_time += time() - t_step_env
t_update_stats = time()
batch = batch_obs(observations)
rewards = torch.tensor(rewards, dtype=torch.float)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float
)
success_masks = torch.tensor(
[
[1.0] if done and "success" in info and info["success"] else [0.0]
for done, info in zip(dones, infos)
],
dtype=torch.float,
)
lengths = torch.tensor(
[
[float(info["episode_length"])]
if done and "episode_length" in info
else [0.0]
for done, info in zip(dones, infos)
],
dtype=torch.float,
)
collision_steps = torch.tensor(
[
[float(info["collision_step"])]
if done and "collision_step" in info
else [0.0]
for done, info in zip(dones, infos)
],
dtype=torch.float,
)
# total_energy_cost = torch.tensor(
# [[float(info["energy_cost"])] if done and "energy_cost" in info else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
# avg_energy_cost = torch.tensor(
# [[float(info["energy_cost"]) / float(info["episode_length"])]
# if done and "energy_cost" in info and "episode_length" in info
# else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
# stage_open_door = torch.tensor(
# [[float(info["stage"] >= 1)] if done and "stage" in info else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
# stage_to_target = torch.tensor(
# [[float(info["stage"] >= 2)] if done and "stage" in info else [0.0]
# for done, info in zip(dones, infos)],
# dtype=torch.float,
# device=device
# )
current_episode_reward += rewards
episode_rewards += (1 - masks) * current_episode_reward
episode_success_rates += success_masks
episode_lengths += lengths
episode_collision_steps += collision_steps
# episode_total_energy_costs += total_energy_cost
# episode_avg_energy_costs += avg_energy_cost
# episode_stage_open_doors += stage_open_door
# episode_stage_to_targets += stage_to_target
episode_counts += 1 - masks
current_episode_reward *= masks
# s_t+1 - batch["rgb"]: [num_processes, 256, 256, 3],
# s_t+1 - batch["depth"]: [num_processes, 256, 256, 1]
# s_t+1 - batch["pointgoal"]: [num_processes, 2]
# h_t+1 - recurrent_hidden_states: [num_processes, hidden_size]
# a_t - actions: [num_processes. 1]
# a_t - action_log_probs: [num_processes. 1]
# V(s_t) - values: [num_processes. 1]
# r_t - rewards: [num_processes. 1]
# mask_t+1 - masks: [[num_processes. 1]
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards,
masks,
)
count_steps += train_envs._num_envs
pth_time += time() - t_update_stats
if len(window_episode_reward) == args.perf_window_size:
window_episode_reward.popleft()
window_episode_success_rates.popleft()
window_episode_lengths.popleft()
window_episode_collision_steps.popleft()
# window_episode_total_energy_costs.popleft()
# window_episode_avg_energy_costs.popleft()
# window_episode_stage_open_doors.popleft()
# window_episode_stage_to_targets.popleft()
window_episode_counts.popleft()
window_episode_reward.append(episode_rewards.clone())
window_episode_success_rates.append(episode_success_rates.clone())
window_episode_lengths.append(episode_lengths.clone())
window_episode_collision_steps.append(episode_collision_steps.clone())
# window_episode_total_energy_costs.append(episode_total_energy_costs.clone())
# window_episode_avg_energy_costs.append(episode_avg_energy_costs.clone())
# window_episode_stage_open_doors.append(episode_stage_open_doors.clone())
# window_episode_stage_to_targets.append(episode_stage_to_targets.clone())
window_episode_counts.append(episode_counts.clone())
t_update_model = time()
with torch.no_grad():
last_observation = {k: v[-1] for k, v in rollouts.observations.items()}
next_value = actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1],
).detach()
# V(s_t+num_steps) - next_value: [num_processes, 1]
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
value_loss, action_loss, dist_entropy = agent.update(rollouts, update=update)
rollouts.after_update()
pth_time += time() - t_update_model
# log stats
if update > 0 and update % args.log_interval == 0:
logger.info(
"update: {}\tenv_steps: {}\tenv_steps_per_sec: {:.3f}\tenv-time: {:.3f}s\tpth-time: {:.3f}s".format(
update,
count_steps,
count_steps / (time() - t_start),
env_time,
pth_time,
)
)
logger.info(
"update: {}\tenv_steps: {}\tvalue_loss: {:.3f}\taction_loss: {:.3f}\tdist_entropy: {:.3f}".format(
update, count_steps, value_loss, action_loss, dist_entropy
)
)
writer.add_scalar(
"time/env_step_per_second",
count_steps / (time() - t_start),
global_step=update,
)
writer.add_scalar(
"time/env_time_per_update", env_time / update, global_step=update
)
writer.add_scalar(
"time/pth_time_per_update", pth_time / update, global_step=update
)
writer.add_scalar(
"time/env_steps_per_update", count_steps / update, global_step=update
)
writer.add_scalar("losses/value_loss", value_loss, global_step=update)
writer.add_scalar("losses/action_loss", action_loss, global_step=update)
writer.add_scalar("losses/dist_entropy", dist_entropy, global_step=update)
window_rewards = (
window_episode_reward[-1] - window_episode_reward[0]
).sum()
window_success_rates = (
window_episode_success_rates[-1] - window_episode_success_rates[0]
).sum()
window_lengths = (
window_episode_lengths[-1] - window_episode_lengths[0]
).sum()
window_collision_steps = (
window_episode_collision_steps[-1] - window_episode_collision_steps[0]
).sum()
# window_total_energy_costs = (
# window_episode_total_energy_costs[-1]
# - window_episode_total_energy_costs[0]
# ).sum()
# window_avg_energy_costs = (
# window_episode_avg_energy_costs[-1] - window_episode_avg_energy_costs[0]
# ).sum()
# window_stage_open_doors = (
# window_episode_stage_open_doors[-1] - window_episode_stage_open_doors[0]
# ).sum()
# window_stage_to_targets = (
# window_episode_stage_to_targets[-1] - window_episode_stage_to_targets[0]
# ).sum()
window_counts = (window_episode_counts[-1] - window_episode_counts[0]).sum()
if window_counts > 0:
reward_mean = (window_rewards / window_counts).item()
success_rate_mean = (window_success_rates / window_counts).item()
lengths_mean = (window_lengths / window_counts).item()
collision_steps_mean = (window_collision_steps / window_counts).item()
# total_energy_costs_mean = (
# window_total_energy_costs / window_counts
# ).item()
# avg_energy_costs_mean = (window_avg_energy_costs / window_counts).item()
# stage_open_doors_mean = (window_stage_open_doors / window_counts).item()
# stage_to_targets_mean = (window_stage_to_targets / window_counts).item()
logger.info(
"average window size {}\treward: {:.3f}\tsuccess_rate: {:.3f}\tepisode length: {:.3f}\t"
"collision_step: {:.3f}".format(
len(window_episode_reward),
reward_mean,
success_rate_mean,
lengths_mean,
collision_steps_mean,
# total_energy_costs_mean,
# avg_energy_costs_mean,
# stage_open_doors_mean,
# stage_to_targets_mean,
)
)
writer.add_scalar(
"train/updates/reward", reward_mean, global_step=update
)
writer.add_scalar(
"train/updates/success_rate", success_rate_mean, global_step=update
)
writer.add_scalar(
"train/updates/episode_length", lengths_mean, global_step=update
)
writer.add_scalar(
"train/updates/collision_step",
collision_steps_mean,
global_step=update,
)
# writer.add_scalar(
# "train/updates/total_energy_cost",
# total_energy_costs_mean,
# global_step=update,
# )
# writer.add_scalar(
# "train/updates/avg_energy_cost",
# avg_energy_costs_mean,
# global_step=update,
# )
# writer.add_scalar(
# "train/updates/stage_open_door",
# stage_open_doors_mean,
# global_step=update,
# )
# writer.add_scalar(
# "train/updates/stage_to_target",
# stage_to_targets_mean,
# global_step=update,
# )
writer.add_scalar(
"train/env_steps/reward", reward_mean, global_step=count_steps
)
writer.add_scalar(
"train/env_steps/success_rate",
success_rate_mean,
global_step=count_steps,
)
writer.add_scalar(
"train/env_steps/episode_length",
lengths_mean,
global_step=count_steps,
)
writer.add_scalar(
"train/env_steps/collision_step",
collision_steps_mean,
global_step=count_steps,
)
# writer.add_scalar(
# "train/env_steps/total_energy_cost",
# total_energy_costs_mean,
# global_step=count_steps,
# )
# writer.add_scalar(
# "train/env_steps/avg_energy_cost",
# avg_energy_costs_mean,
# global_step=count_steps,
# )
# writer.add_scalar(
# "train/env_steps/stage_open_door",
# stage_open_doors_mean,
# global_step=count_steps,
# )
# writer.add_scalar(
# "train/env_steps/stage_to_target",
# stage_to_targets_mean,
# global_step=count_steps,
# )
else:
logger.info("No episodes finish in current window")
# checkpoint model
if update > 0 and update % args.checkpoint_interval == 0:
checkpoint = {"state_dict": agent.state_dict()}
torch.save(
checkpoint,
os.path.join(
ckpt_folder,
"ckpt.{}.pth".format(update),
),
)
if update > 0 and update % args.eval_interval == 0:
evaluate(
eval_envs,
actor_critic,
args.hidden_size,
args.num_eval_episodes,
device,
writer,
action_mask,
update=update,
count_steps=count_steps,
eval_only=False,
)
if __name__ == "__main__":
main()
| 37.756364 | 116 | 0.564159 |
acf166904250d1c7080974b44ce80a2e0af5b217 | 761 | py | Python | article_reader.py | DevopediaOrg/wikipedia-reader | c90eb5431b36449afe493df650284fc18a526616 | [
"MIT"
] | null | null | null | article_reader.py | DevopediaOrg/wikipedia-reader | c90eb5431b36449afe493df650284fc18a526616 | [
"MIT"
] | 1 | 2021-03-31T19:48:39.000Z | 2021-03-31T19:48:39.000Z | article_reader.py | DevopediaOrg/wikipedia-reader | c90eb5431b36449afe493df650284fc18a526616 | [
"MIT"
] | null | null | null | import re
import sys
from parsers import WikitextParser, HtmlParser
class ArticleReader:
''' Read article content using suitable parsers.
'''
def __init__(self, **kwargs):
self.config = kwargs
self.wtparser = WikitextParser(**kwargs)
self.hparser = HtmlParser(**kwargs)
def get_seed_links(self, text, targets=None):
return self.wtparser.get_seed_links(text, targets)
def get_links(self, title, text, html):
links = self.wtparser.get_links(title, text)
if self.config['transcludes']['enabled']:
transcludes = self.wtparser.get_transcludes(text)
transcludes |= self.hparser.get_transcludes(html)
else: transcludes = set()
return links, transcludes
| 28.185185 | 61 | 0.664915 |
acf1675304309a4690915e53bf5ad7d6ee893c3b | 489 | py | Python | Luke 15/15.py | Nilzone-/Knowit-Julekalender-2017 | 66ef8a651277e0fef7d9278f3f129410b5b98ee0 | [
"MIT"
] | null | null | null | Luke 15/15.py | Nilzone-/Knowit-Julekalender-2017 | 66ef8a651277e0fef7d9278f3f129410b5b98ee0 | [
"MIT"
] | null | null | null | Luke 15/15.py | Nilzone-/Knowit-Julekalender-2017 | 66ef8a651277e0fef7d9278f3f129410b5b98ee0 | [
"MIT"
] | null | null | null | import numpy as np
c, a = [], np.array([23, 74, 26, 23, 92, 92, 44, 13, 34, 23, 69, 4, 19, 94, 94, 38, 14, 9, 51, 98, 72, 46, 17, 25, 21, 87, 99, 50, 59, 53, 82, 24, 93, 16, 88, 52, 14, 38, 27, 7, 18, 81, 13, 75, 80, 11, 29, 39, 37, 78, 55, 17, 78, 12, 77, 84, 63, 29, 68, 32, 17, 55, 31, 30, 3, 17, 99, 6, 45, 81, 75, 31, 50, 93, 66, 98, 94, 59, 68, 30, 98, 57, 83, 75, 68, 85, 98, 76, 91, 23, 53, 42, 72, 77])
while len(a) > 0:
c.append(len(a))
a = a - np.min(a)
a = a[a > 0]
print c | 61.125 | 392 | 0.503067 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.