text stringlengths 957 885k |
|---|
<reponame>AllanDaemon/vscode-python<filename>pythonFiles/jedi/evaluate/imports.py
"""
:mod:`jedi.evaluate.imports` is here to resolve import statements and return
the modules/classes/functions/whatever, which they stand for. However there's
not any actual importing done. This module is about finding modules in the
filesystem. This can be quite tricky sometimes, because Python imports are not
always that simple.
This module uses imp for python up to 3.2 and importlib for python 3.3 on; the
correct implementation is delegated to _compatibility.
This module also supports import autocompletion, which means to complete
statements like ``from datetim`` (cursor at the end would return ``datetime``).
"""
import os
from parso.python import tree
from parso.tree import search_ancestor
from parso import python_bytes_to_unicode
from jedi._compatibility import unicode, ImplicitNSInfo, force_unicode
from jedi import debug
from jedi import settings
from jedi.parser_utils import get_cached_code_lines
from jedi.evaluate import sys_path
from jedi.evaluate import helpers
from jedi.evaluate import compiled
from jedi.evaluate import analysis
from jedi.evaluate.utils import unite, dotted_from_fs_path
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.filters import AbstractNameDefinition
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
class ModuleCache(object):
def __init__(self):
self._path_cache = {}
self._name_cache = {}
def add(self, module, name):
path = module.py__file__()
self._path_cache[path] = module
self._name_cache[name] = module
def iterate_modules_with_names(self):
return self._name_cache.items()
def get(self, name):
return self._name_cache[name]
def get_from_path(self, path):
return self._path_cache[path]
# This memoization is needed, because otherwise we will infinitely loop on
# certain imports.
@evaluator_method_cache(default=NO_CONTEXTS)
def infer_import(context, tree_name, is_goto=False):
module_context = context.get_root_context()
import_node = search_ancestor(tree_name, 'import_name', 'import_from')
import_path = import_node.get_path_for_name(tree_name)
from_import_name = None
evaluator = context.evaluator
try:
from_names = import_node.get_from_names()
except AttributeError:
# Is an import_name
pass
else:
if len(from_names) + 1 == len(import_path):
# We have to fetch the from_names part first and then check
# if from_names exists in the modules.
from_import_name = import_path[-1]
import_path = from_names
importer = Importer(evaluator, tuple(import_path),
module_context, import_node.level)
types = importer.follow()
#if import_node.is_nested() and not self.nested_resolve:
# scopes = [NestedImportModule(module, import_node)]
if not types:
return NO_CONTEXTS
if from_import_name is not None:
types = unite(
t.py__getattribute__(
from_import_name,
name_context=context,
is_goto=is_goto,
analysis_errors=False
)
for t in types
)
if not is_goto:
types = ContextSet.from_set(types)
if not types:
path = import_path + [from_import_name]
importer = Importer(evaluator, tuple(path),
module_context, import_node.level)
types = importer.follow()
# goto only accepts `Name`
if is_goto:
types = set(s.name for s in types)
else:
# goto only accepts `Name`
if is_goto:
types = set(s.name for s in types)
debug.dbg('after import: %s', types)
return types
class NestedImportModule(tree.Module):
"""
TODO while there's no use case for nested import module right now, we might
be able to use them for static analysis checks later on.
"""
def __init__(self, module, nested_import):
self._module = module
self._nested_import = nested_import
def _get_nested_import_name(self):
"""
Generates an Import statement, that can be used to fake nested imports.
"""
i = self._nested_import
# This is not an existing Import statement. Therefore, set position to
# 0 (0 is not a valid line number).
zero = (0, 0)
names = [unicode(name) for name in i.namespace_names[1:]]
name = helpers.FakeName(names, self._nested_import)
new = tree.Import(i._sub_module, zero, zero, name)
new.parent = self._module
debug.dbg('Generated a nested import: %s', new)
return helpers.FakeName(str(i.namespace_names[1]), new)
def __getattr__(self, name):
return getattr(self._module, name)
def __repr__(self):
return "<%s: %s of %s>" % (self.__class__.__name__, self._module,
self._nested_import)
def _add_error(context, name, message=None):
# Should be a name, not a string!
if message is None:
name_str = str(name.value) if isinstance(name, tree.Name) else name
message = 'No module named ' + name_str
if hasattr(name, 'parent'):
analysis.add(context, 'import-error', name, message)
else:
debug.warning('ImportError without origin: ' + message)
class ImportName(AbstractNameDefinition):
start_pos = (1, 0)
_level = 0
def __init__(self, parent_context, string_name):
self.parent_context = parent_context
self.string_name = string_name
def infer(self):
return Importer(
self.parent_context.evaluator,
[self.string_name],
self.parent_context,
level=self._level,
).follow()
def goto(self):
return [m.name for m in self.infer()]
def get_root_context(self):
# Not sure if this is correct.
return self.parent_context.get_root_context()
@property
def api_type(self):
return 'module'
class SubModuleName(ImportName):
_level = 1
class Importer(object):
def __init__(self, evaluator, import_path, module_context, level=0):
"""
An implementation similar to ``__import__``. Use `follow`
to actually follow the imports.
*level* specifies whether to use absolute or relative imports. 0 (the
default) means only perform absolute imports. Positive values for level
indicate the number of parent directories to search relative to the
directory of the module calling ``__import__()`` (see PEP 328 for the
details).
:param import_path: List of namespaces (strings or Names).
"""
debug.speed('import %s' % (import_path,))
self._evaluator = evaluator
self.level = level
self.module_context = module_context
try:
self.file_path = module_context.py__file__()
except AttributeError:
# Can be None for certain compiled modules like 'builtins'.
self.file_path = None
if level:
base = module_context.py__package__().split('.')
if base == [''] or base == ['__main__']:
base = []
if level > len(base):
path = module_context.py__file__()
if path is not None:
import_path = list(import_path)
p = path
for i in range(level):
p = os.path.dirname(p)
dir_name = os.path.basename(p)
# This is not the proper way to do relative imports. However, since
# Jedi cannot be sure about the entry point, we just calculate an
# absolute path here.
if dir_name:
# TODO those sys.modules modifications are getting
# really stupid. this is the 3rd time that we're using
# this. We should probably refactor.
if path.endswith(os.path.sep + 'os.py'):
import_path.insert(0, 'os')
else:
import_path.insert(0, dir_name)
else:
_add_error(
module_context, import_path[-1],
message='Attempted relative import beyond top-level package.'
)
import_path = []
# If no path is defined in the module we have no ideas where we
# are in the file system. Therefore we cannot know what to do.
# In this case we just let the path there and ignore that it's
# a relative path. Not sure if that's a good idea.
else:
# Here we basically rewrite the level to 0.
base = tuple(base)
if level > 1:
base = base[:-level + 1]
import_path = base + tuple(import_path)
self.import_path = import_path
@property
def str_import_path(self):
"""Returns the import path as pure strings instead of `Name`."""
return tuple(
name.value if isinstance(name, tree.Name) else name
for name in self.import_path
)
def sys_path_with_modifications(self):
sys_path_mod = self._evaluator.get_sys_path() \
+ sys_path.check_sys_path_modifications(self.module_context)
if self.import_path and self.file_path is not None \
and self._evaluator.environment.version_info.major == 2:
# Python2 uses an old strange way of importing relative imports.
sys_path_mod.append(force_unicode(os.path.dirname(self.file_path)))
return sys_path_mod
def follow(self):
if not self.import_path:
return NO_CONTEXTS
return self._do_import(self.import_path, self.sys_path_with_modifications())
def _do_import(self, import_path, sys_path):
"""
This method is very similar to importlib's `_gcd_import`.
"""
import_parts = [
force_unicode(i.value if isinstance(i, tree.Name) else i)
for i in import_path
]
# Handle "magic" Flask extension imports:
# ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
# New style.
ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
modules = self._do_import(ipath, sys_path)
if modules:
return modules
else:
# Old style
return self._do_import(('flaskext',) + import_path[2:], sys_path)
module_name = '.'.join(import_parts)
try:
return ContextSet(self._evaluator.module_cache.get(module_name))
except KeyError:
pass
if len(import_path) > 1:
# This is a recursive way of importing that works great with
# the module cache.
bases = self._do_import(import_path[:-1], sys_path)
if not bases:
return NO_CONTEXTS
# We can take the first element, because only the os special
# case yields multiple modules, which is not important for
# further imports.
parent_module = list(bases)[0]
# This is a huge exception, we follow a nested import
# ``os.path``, because it's a very important one in Python
# that is being achieved by messing with ``sys.modules`` in
# ``os``.
if import_parts == ['os', 'path']:
return parent_module.py__getattribute__('path')
try:
method = parent_module.py__path__
except AttributeError:
# The module is not a package.
_add_error(self.module_context, import_path[-1])
return NO_CONTEXTS
else:
paths = method()
debug.dbg('search_module %s in paths %s', module_name, paths)
for path in paths:
# At the moment we are only using one path. So this is
# not important to be correct.
if not isinstance(path, list):
path = [path]
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
string=import_parts[-1],
path=path,
full_name=module_name
)
if module_path is not None:
break
else:
_add_error(self.module_context, import_path[-1])
return NO_CONTEXTS
else:
debug.dbg('search_module %s in %s', import_parts[-1], self.file_path)
# Override the sys.path. It works only good that way.
# Injecting the path directly into `find_module` did not work.
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
string=import_parts[-1],
full_name=module_name,
sys_path=sys_path,
)
if module_path is None:
# The module is not a package.
_add_error(self.module_context, import_path[-1])
return NO_CONTEXTS
module = _load_module(
self._evaluator, module_path, code, sys_path,
module_name=module_name,
safe_module_name=True,
)
if module is None:
# The file might raise an ImportError e.g. and therefore not be
# importable.
return NO_CONTEXTS
return ContextSet(module)
def _generate_name(self, name, in_module=None):
# Create a pseudo import to be able to follow them.
if in_module is None:
return ImportName(self.module_context, name)
return SubModuleName(in_module, name)
def _get_module_names(self, search_path=None, in_module=None):
"""
Get the names of all modules in the search_path. This means file names
and not names defined in the files.
"""
sub = self._evaluator.compiled_subprocess
names = []
# add builtin module names
if search_path is None and in_module is None:
names += [self._generate_name(name) for name in sub.get_builtin_module_names()]
if search_path is None:
search_path = self.sys_path_with_modifications()
for name in sub.list_module_names(search_path):
names.append(self._generate_name(name, in_module=in_module))
return names
def completion_names(self, evaluator, only_modules=False):
"""
:param only_modules: Indicates wheter it's possible to import a
definition that is not defined in a module.
"""
from jedi.evaluate.context import ModuleContext
from jedi.evaluate.context.namespace import ImplicitNamespaceContext
names = []
if self.import_path:
# flask
if self.str_import_path == ('flask', 'ext'):
# List Flask extensions like ``flask_foo``
for mod in self._get_module_names():
modname = mod.string_name
if modname.startswith('flask_'):
extname = modname[len('flask_'):]
names.append(self._generate_name(extname))
# Now the old style: ``flaskext.foo``
for dir in self.sys_path_with_modifications():
flaskext = os.path.join(dir, 'flaskext')
if os.path.isdir(flaskext):
names += self._get_module_names([flaskext])
for context in self.follow():
# Non-modules are not completable.
if context.api_type != 'module': # not a module
continue
# namespace packages
if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'):
paths = context.py__path__()
names += self._get_module_names(paths, in_module=context)
# implicit namespace packages
elif isinstance(context, ImplicitNamespaceContext):
paths = context.paths
names += self._get_module_names(paths, in_module=context)
if only_modules:
# In the case of an import like `from x.` we don't need to
# add all the variables.
if ('os',) == self.str_import_path and not self.level:
# os.path is a hardcoded exception, because it's a
# ``sys.modules`` modification.
names.append(self._generate_name('path', context))
continue
for filter in context.get_filters(search_global=False):
names += filter.values()
else:
# Empty import path=completion after import
if not self.level:
names += self._get_module_names()
if self.file_path is not None:
path = os.path.abspath(self.file_path)
for i in range(self.level - 1):
path = os.path.dirname(path)
names += self._get_module_names([path])
return names
def _load_module(evaluator, path=None, code=None, sys_path=None,
module_name=None, safe_module_name=False):
try:
return evaluator.module_cache.get(module_name)
except KeyError:
pass
try:
return evaluator.module_cache.get_from_path(path)
except KeyError:
pass
if isinstance(path, ImplicitNSInfo):
from jedi.evaluate.context.namespace import ImplicitNamespaceContext
module = ImplicitNamespaceContext(
evaluator,
fullname=path.name,
paths=path.paths,
)
else:
if sys_path is None:
sys_path = evaluator.get_sys_path()
dotted_path = path and dotted_from_fs_path(path, sys_path)
if path is not None and path.endswith(('.py', '.zip', '.egg')) \
and dotted_path not in settings.auto_import_modules:
module_node = evaluator.parse(
code=code, path=path, cache=True, diff_cache=True,
cache_path=settings.cache_directory)
from jedi.evaluate.context import ModuleContext
module = ModuleContext(
evaluator, module_node,
path=path,
code_lines=get_cached_code_lines(evaluator.grammar, path),
)
else:
module = compiled.load_module(evaluator, path=path, sys_path=sys_path)
if module is not None and module_name is not None:
add_module_to_cache(evaluator, module_name, module, safe=safe_module_name)
return module
def add_module_to_cache(evaluator, module_name, module, safe=False):
if not safe and '.' not in module_name:
# We cannot add paths with dots, because that would collide with
# the sepatator dots for nested packages. Therefore we return
# `__main__` in ModuleWrapper.py__name__(), which is similar to
# Python behavior.
return
evaluator.module_cache.add(module, module_name)
def get_modules_containing_name(evaluator, modules, name):
"""
Search a name in the directories of modules.
"""
def check_directories(paths):
for p in paths:
if p is not None:
# We need abspath, because the seetings paths might not already
# have been converted to absolute paths.
d = os.path.dirname(os.path.abspath(p))
for file_name in os.listdir(d):
path = os.path.join(d, file_name)
if file_name.endswith('.py'):
yield path
def check_fs(path):
with open(path, 'rb') as f:
code = python_bytes_to_unicode(f.read(), errors='replace')
if name in code:
e_sys_path = evaluator.get_sys_path()
module_name = sys_path.dotted_path_in_sys_path(e_sys_path, path)
module = _load_module(
evaluator, path, code,
sys_path=e_sys_path, module_name=module_name
)
return module
# skip non python modules
used_mod_paths = set()
for m in modules:
try:
path = m.py__file__()
except AttributeError:
pass
else:
used_mod_paths.add(path)
yield m
if not settings.dynamic_params_for_other_modules:
return
additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules)
# Check the directories of used modules.
paths = (additional | set(check_directories(used_mod_paths))) \
- used_mod_paths
# Sort here to make issues less random.
for p in sorted(paths):
# make testing easier, sort it - same results on every interpreter
m = check_fs(p)
if m is not None and not isinstance(m, compiled.CompiledObject):
yield m
|
<reponame>musiclvme/distant_speech_recognition
#!/usr/bin/python
"""
Test subband acoustic echo cancellation on the single channel data.
.. moduleauthor:: <NAME>, <NAME> <<EMAIL>>
"""
import argparse, json
import os.path
import pickle
import wave
import sys
import numpy
from btk20.common import *
from btk20.stream import *
from btk20.feature import *
from btk20.modulated import *
from btk20.aec import *
def test_subband_aec(analysis_filter_path,
synthesis_filter_path,
M, m, r,
input_audio_path,
reference_audio_path,
out_path,
aec_conf,
samplerate=16000):
D = M / 2**r # frame shift
# Read analysis prototype 'h'
with open(analysis_filter_path, 'r') as fp:
h_fb = pickle.load(fp)
# Read synthesis prototype 'g'
with open(synthesis_filter_path, 'r') as fp:
g_fb = pickle.load(fp)
# Instantiation of an audio file reader
input_sample_feat = SampleFeaturePtr(block_len = D, shift_len = D, pad_zeros = True)
reference_sample_feat = SampleFeaturePtr(block_len = D, shift_len = D, pad_zeros = True)
# Instantiation of over-sampled DFT analysis filter bank
input_afb = OverSampledDFTAnalysisBankPtr(input_sample_feat, prototype = h_fb, M = M, m = m, r = r, delay_compensation_type=2)
reference_afb = OverSampledDFTAnalysisBankPtr(reference_sample_feat, prototype = h_fb, M = M, m = m, r = r, delay_compensation_type=2)
# Instantiation of subband AEC
if aec_conf['type'].lower() == 'information_filter':
# Information Kalman filter AEC
aec = InformationFilterEchoCancellationFeaturePtr(reference_afb, input_afb,
sample_num = aec_conf.get('filter_length', 2),
beta = aec_conf.get('beta', 0.95),
sigmau2 = aec_conf.get('sigmau2', 10E-4),
sigmak2 = aec_conf.get('sigmak2', 5.0),
snr_threshold = aec_conf.get('snr_threshold', 0.01),
energy_threshold = aec_conf.get('energy_threshold', 100),
smooth = aec_conf.get('smooth', 0.9),
loading = aec_conf.get('loading', 1.0E-02),
amp4play = aec_conf.get('amp4play', 1.0))
elif aec_conf['type'].lower() == 'square_root_information_filter':
# Square root information filter
aec = SquareRootInformationFilterEchoCancellationFeaturePtr(reference_afb, input_afb,
sample_num = aec_conf.get('filter_length', 2),
beta = aec_conf.get('beta', 0.95),
sigmau2 = aec_conf.get('sigmau2', 10E-4),
sigmak2 = aec_conf.get('sigmak2', 5.0),
snr_threshold = aec_conf.get('snr_threshold', 0.01),
energy_threshold = aec_conf.get('energy_threshold', 100),
smooth = aec_conf.get('smooth', 0.9),
loading = aec_conf.get('loading', 1.0E-02),
amp4play = aec_conf.get('amp4play', 1.0))
elif aec_conf['type'].lower() == 'dtd_block_kalman_filter':
# Kalman filtering AEC with double-talk detector (DTD)
aec = DTDBlockKalmanFilterEchoCancellationFeaturePtr(reference_afb, input_afb,
sample_num = aec_conf.get('filter_length', 2),
beta = aec_conf.get('beta', 0.95),
sigmau2 = aec_conf.get('sigmau2', 10E-4),
sigmak2 = aec_conf.get('sigmak2', 5.0),
snr_threshold = aec_conf.get('snr_threshold', 0.01),
energy_threshold = aec_conf.get('energy_threshold', 100),
smooth = aec_conf.get('smooth', 0.9),
amp4play = aec_conf.get('amp4play', 1.0))
elif aec_conf['type'].lower() == 'nlms':
# Normalized least mean square AEC
aec = NLMSAcousticEchoCancellationFeaturePtr(reference_afb, input_afb,
delta = aec_conf.get('delta', 100.0),
epsilon = aec_conf.get('epsilon', 1.0E-04),
threshold = aec_conf.get('energy_threshold', 100.0))
else:
raise KeyError('Invalid AEC type {}'.format(aec_conf['type']))
# Instantiation of over-sampled DFT synthesis filter bank
sfb = OverSampledDFTSynthesisBankPtr(aec, prototype = g_fb, M = M, m = m, r = r, delay_compensation_type=2)
# Read the observed audio file
input_sample_feat.read(input_audio_path, samplerate)
# Read the reference audio file
reference_sample_feat.read(reference_audio_path, samplerate)
if not os.path.exists(os.path.dirname(out_path)):
try:
os.makedirs(os.path.dirname(out_path))
except:
pass
wavefile = wave.open(out_path, 'w')
wavefile.setnchannels(1)
wavefile.setsampwidth(2)
wavefile.setframerate(int(samplerate))
# Perform subband AEC through the oversampled DFT-modulated filer bank
for frame_no, b in enumerate(sfb):
if frame_no % 128 == 0:
print('%0.2f sec. processed' %(frame_no * D / samplerate))
storewave = numpy.array(b, numpy.int16)
wavefile.writeframes(storewave.tostring())
wavefile.close()
def build_parser():
M = 256
m = 4
r = 1
protoPath = 'prototype.ny'
analysis_filter_path = '%s/h-M%d-m%d-r%d.pickle' %(protoPath, M, m, r)
synthesis_filter_path = '%s/g-M%d-m%d-r%d.pickle' %(protoPath, M, m, r)
parser = argparse.ArgumentParser(description='test subband AEC.')
parser.add_argument('-a', dest='analysis_filter_path',
default=analysis_filter_path,
help='analysis filter prototype file')
parser.add_argument('-s', dest='synthesis_filter_path',
default=synthesis_filter_path,
help='synthesis filter prototype file')
parser.add_argument('-M', dest='M',
default=M, type=int,
help='no. of subbands')
parser.add_argument('-m', dest='m',
default=m, type=int,
help='Prototype filter length factor')
parser.add_argument('-r', dest='r',
default=r, type=int,
help='Decimation factor')
parser.add_argument('-i', dest='input_audio_path',
default='data/speech_and_reverb_lt.wav',
help='observation audio file')
parser.add_argument('-o', dest='out_path',
default='out/aec_output.wav',
help='output audio file')
parser.add_argument('-p', dest='reference_audio_path',
default='data/lt.wav',
help='reference audio file')
parser.add_argument('-c', dest='aec_conf_path',
default=None,
help='JSON path for AEC configuration')
return parser
if __name__ == '__main__':
parser = build_parser()
args = parser.parse_args()
if args.aec_conf_path is None:
# Default AEC configuration
aec_conf={'type':'dtd_block_kalman_filter', # 'information_filter' or 'square_root_information_filter'
'filter_length':36, # length of the subband Kalman filter
'loading':10e-4, # diagonal loading added to the information matrix
'sigmau2':10e-6, # initial variance
'sigmak2':5.0, # initial Kalman gain
'beta':0.95, # forgetting factor recursive observation noise variance estimation
'snr_threshold':0.01,
'energy_threshold':1.0E+01,
'smooth':0.95,
'amp4play':1.0,
}
else:
with open(args.aec_conf_path, 'r') as jsonfp:
aec_conf = json.load(jsonfp)
print('AEC config.')
print(json.dumps(aec_conf, indent=4))
print('')
test_subband_aec(args.analysis_filter_path,
args.synthesis_filter_path,
args.M, args.m, args.r,
args.input_audio_path,
args.reference_audio_path,
args.out_path,
aec_conf,
samplerate=16000)
|
<reponame>gaceladri/draft
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import functools
import collections
from code.util import cast_like, dropout_with_broadcast_dims, should_generate_summaries, shape_list
from code.util import _generate_relative_positions_embeddings, _relative_attention_inner, _relative_position_to_absolute_position_masked
from code.util import _absolute_position_to_relative_position_masked, attention_bias_lower_triangle, ones_matrix_band_part
from code.util import gather_dilated_memory_blocks, reshape_by_blocks, embedding_to_padding, flatten_all_but_last
from code.util import add_name_scope, add_var_scope
def dot_product_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
save_weigths_to=None,
dropout_broadcast_dims=None):
"""dot-product attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
A Tensor.
"""
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
if bias is not None:
bias = cast_like(bias, logits)
logits += bias
weights = tf.nn.softmax(logits, name="attention_weigths")
if save_weigths_to is not None:
save_weigths_to[scope.name] = weights
save_weigths_to[scope.name + "/logits"] = logits
# dropping out the attention links for each of the heads
weights = dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return tf.matmul(weights, v)
def dot_product_attention_relative(q,
k,
v,
bias,
max_relative_position,
dropout_rate=0.0,
image_shapes=None,
name=None):
"""Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations from the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_relative_position: an integer specifying the maximum distance between
inputs that unique position embeddings should be learned for.
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
Returns:
A Tensor.
Raises:
ValueError: if max_relative_position is not > 0.
"""
if not max_relative_position:
raise ValueError("Max relative position (%s) should be > 0 when using "
"relative self attentnion." % (max_relative_position))
with tf.variable_scope(
name, default_name="dot_product_attention_relative", values=[q, k, v]):
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape().assert_is_compatible_with(v.get_shape())
# Use separate embeddings suitable for keys and values.
depth = q.get_shape().as_list()[3]
lenght = shape_list(q)[2]
relations_keys = _generate_relative_positions_embeddings(
lenght, depth, max_relative_position, "relative_positions_keys")
relations_values = _generate_relative_positions_embeddings(
lenght, depth, max_relative_position, "relative_positions_vales")
# Compute self attention considering the relative position embeddings.
logits = _relative_attention_inner(q, k, relations_keys, True)
if bias is not None:
logits += bias
weigths = tf.nn.softmax(logits, name="attention_weigths")
weigths = tf.nn.dropout(weigths, 1.0 - dropout_rate)
return _relative_attention_inner(weigths, v, relations_values, False)
def dot_product_self_attention_relative_v2(q,
k,
v,
bias,
max_length=None,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
dropout_broadcast_dims=None):
"""Calculate relative position-aware dot-product self-attention.
Only works for masked self-attention (no looking forward).
TODO(noam): extend to unmasked self-attention
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_length: an integer - changing this invalidates checkpoints
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
A Tensor.
"""
with tf.variable_scope(
name,
default_name="dot_product_self_attention_relative_v2",
values=[q, k, v]):
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape().assert_is_compatible_with(v.get_shape())
# Use separate embeddings suitable for keys and values.
length = shape_list(q)[2]
assert max_length is not None
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
# now add relative logits
# [batch, num_heads, query_length, max_length]
rel_logits = tf.layers.dense(q, max_length, name="rel0")
# [batch, num_heads, query_length, max_length]
rel_logits = tf.slice(rel_logits, [0, 0, 0, max_length - length],
[-1, -1, -1, -1])
rel_logits = _relative_position_to_absolute_position_masked(rel_logits)
logits += rel_logits
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# dropping out the attention links for each of the heads
weights = dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
ret = tf.matmul(weights, v)
# [batch, num_heads, query_length, memory_length]
relative_weights = _absolute_position_to_relative_position_masked(
weights)
# [batch, num_heads, query_length, memory_length]
relative_weights = tf.pad(
relative_weights, [[0, 0], [0, 0], [0, 0], [max_length - length, 0]])
relative_weights.set_shape([None, None, None, max_length])
depth_v = shape_list(v)[3]
ret += tf.layers.dense(relative_weights, depth_v, name="rel1")
return ret
BatchInfo = collections.namedtuple("BatchInfo", "coordinates, order")
@add_name_scope()
def sparse_dot_product_attention(q, k, v, bi, use_map_fn, experts_params):
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The funciton don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from al different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
experts_params (dict): Additional params for the local expert
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
"""
batch_size, nb_heads, _, depth = shape_list(q)
@add_name_scope()
def flatten_first_dims(x):
"""Reshape such that x is [num_heads, -1, depth]."""
# Case 1: Either constant batch size of size 1 or batch already flattened
if x.get_shape().as_list()[0] == 1:
return tf.squeeze(x, axis=0)
# Case 2: Flatten batch dimension
x = tf.transpose(x, perm=[1, 0, 2, 3])
x = tf.reshape(x, [nb_heads, -1, depth])
return x
def flatten_batch(x):
if x is None:
return x
return flatten_all_but_last(x)
q = flatten_first_dims(q)
k = flatten_first_dims(k)
v = flatten_first_dims(v)
bi = BatchInfo(
coordinates=flatten_batch(bi.coordinates),
order=flatten_batch(bi.order))
# Unstack heads
list_q = tf.unstack(q) # list[tf.Tensor(shape=batch * length, depth)]
list_k = tf.unstack(k)
list_v = tf.unstack(v)
list_gates_q = []
list_gates_k = []
total_loss = 0.0
# There might be a more optimized way to compute all heads at once
for single_q, single_k, _ in zip(list_q, list_k, list_v):
# Each head get its own dispatcher
lsh_gating = LshGating(
depth=single_k.get_shape().as_list()[-1], **experts_params)
list_gates_q.append(lsh_gating.get_gates(single_q))
list_gates_k.append(lsh_gating.get_gates(single_k))
gates_q = tf.stack(list_gates_q)
gates_k = tf.stack(list_gates_k)
# Process each head separately.
v_out = map_fn_switch(
lambda args: dot_product_single_head(bi=bi, *args),
elems=(q, k, v, gates_q, gates_k),
dtype=(tf.float32),
parallel_iterations=2,
use_map_fn=use_map_fn)
# Restore original shape as expected by multihead_attention
if isinstance(batch_size, int) and batch_size == 1:
v_out = tf.expand_dims(v_out, axis=0) # Restore batch_size = 1
else:
v_out = tf.reshape(v_out, [nb_heads, batch_size, -1, depth])
v_out = tf.transpose(v_out, [1, 0, 2, 3])
return v_out, total_loss / nb_heads
multihead_attention_sparse_dot_prod = functools.partial(
multihead_attention, attention_type=sparse_dot_product_attention)
def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None):
"""Attention to the source and a neighborhood to the left within a block.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position in the corresponding block
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="within_local_attention_1d", values=[q, k, v]):
v_shape = v.get_shape()
batch, heads, length, _ = shape_list(q)
if isinstance(block_length, tf.Tensor):
const = tf.contrib.util.constant_value(block_length)
if const is not None:
block_length = int(const)
depth_k = shape_list(k)[3]
depth_v = shape_list(v)[3]
original_length = length
padding_size = tf.mod(-length, block_length)
length += padding_size
padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]]
q = tf.pad(q, padding)
k = tf.pad(k, padding)
v = tf.pad(v, padding)
num_blocks = tf.div(length, block_length)
# compute attention for all subsequent query blocks.
q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k])
k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k])
v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v])
# attention shape: [batch, heads, num_blocks, block_length, block_length]
attention = tf.matmul(q, k, transpose_b=True)
attention += tf.reshape(
attention_bias_lower_triangle(block_length),
[1, 1, 1, block_length, block_length])
attention = tf.nn.softmax(attention)
# initial output shape: [batch, heads, num_blocks, block_length, depth_v]
output = tf.matmul(attention, v)
output = tf.reshape(output, [batch, heads, -1, depth_v])
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape(v_shape)
return output
def local_attention_1d(q, k, v, block_length=128, filter_width=100, name=None):
"""strided block local self-attention.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
filter_width: an integer indicating how much to look left.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="local_self_attention_1d", values=[q, k, v]):
v_shape = v.get_shape()
depth_v = shape_list(v)[3]
batch_size = shape_list(q)[0]
num_heads = shape_list(q)[1]
original_length = shape_list(q)[2]
# making sure q is a multiple of d
def pad_to_multiple(x, pad_length):
x_length = shape_list(x)[2]
return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]])
def pad_l_and_r(x, pad_length):
return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]])
q = pad_to_multiple(q, block_length)
k = pad_to_multiple(k, block_length)
v = pad_to_multiple(v, block_length)
# Setting up q blocks
new_q_shape = shape_list(q)
# Setting up q blocks
q = tf.reshape(q, [
new_q_shape[0], new_q_shape[1], new_q_shape[2] // block_length,
block_length, new_q_shape[3]
])
# Setting up k and v values
k = pad_l_and_r(k, filter_width)
v = pad_l_and_r(v, filter_width)
length = shape_list(k)[2]
full_filter_width = block_length + 2 * filter_width
# getting gather indices
indices = tf.range(0, length, delta=1, name="index_range")
# making indices [1, length, 1] to appy convs
indices = tf.reshape(indices, [1, -1, 1])
kernel = tf.expand_dims(tf.eye(full_filter_width), axis=1)
gather_indices = tf.nn.conv1d(
tf.cast(indices, tf.float32),
kernel,
block_length,
padding="VALID",
name="gather_conv")
gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0)
# [length, batch, heads, dim]
k_t = tf.transpose(k, [2, 0, 1, 3])
k_new = tf.gather(k_t, gather_indices)
# [batch, heads, blocks, block_length, dim]
k_new = tf.transpose(k_new, [2, 3, 0, 1, 4])
attention_bias = tf.expand_dims(
embedding_to_padding(k_new) * -1e9, axis=-2)
v_t = tf.transpose(v, [2, 0, 1, 3])
v_new = tf.gather(v_t, gather_indices)
v_new = tf.transpose(v_new, [2, 3, 0, 1, 4])
output = dot_product_attention(
q,
k_new,
v_new,
attention_bias,
dropout_rate=0.,
name="local_1d")
output = tf.reshape(output, [batch_size, num_heads, -1, depth_v])
# Remove the padding if introduced
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape(v_shape)
return output
def masked_local_attention_1d(q,
k,
v,
block_length=128,
name=None):
"""Attention to the source position and a neighborhood to the left of it.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
If mask_right is True, then a target position cannot see greater source
positions.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="local_attention_1d", values=[q, k, v]):
batch = shape_list(q)[0]
heads = shape_list(q)[1]
length = shape_list(q)[2]
if isinstance(block_length, tf.Tensor):
const = tf.contrib.util.constant_value(block_length)
if const is not None:
block_length = int(const)
# If (length < 2 * block_length), then we use only one block.
if isinstance(length, int) and isinstance(block_length, int):
block_length = length if length < block_length * 2 else block_length
else:
block_length = tf.where(
tf.less(length, block_length * 2), length, block_length)
depth_k = shape_list(k)[3]
depth_v = shape_list(v)[3]
original_length = length
padding_size = tf.mod(-length, block_length)
length += padding_size
padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]]
q = tf.pad(q, padding)
k = tf.pad(k, padding)
v = tf.pad(v, padding)
if isinstance(length, int) and isinstance(block_length, int):
num_blocks = length // block_length
else:
num_blocks = tf.div(length, block_length)
# compute attention for the first query block.
first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1])
first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1])
first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1])
first_output = dot_product_attention(
first_q,
first_k,
first_v,
attention_bias_lower_triangle(block_length),
name="fist_block")
# compute attention for all subsequent query blocks.
q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k])
k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k])
v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v])
def local(x, depth):
"""Create a local version of the keys or values."""
prev_block = tf.slice(x, [0, 0, 0, 0, 0],
[-1, -1, num_blocks - 1, -1, -1])
cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1])
local_block = tf.concat([prev_block, cur_block], 3)
return tf.reshape(local_block,
[batch, heads, num_blocks - 1, block_length * 2, depth])
local_k = local(k, depth_k)
local_v = local(v, depth_v)
tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1])
tail_q = tf.reshape(tail_q,
[batch, heads, num_blocks - 1, block_length, depth_k])
local_length = shape_list(local_k)[3]
# [batch, heads, num_blocks - 1, block_length, local_length]
attention = tf.matmul(tail_q, local_k, transpose_b=True)
# make sure source_pos <= target_pos
good_part = ones_matrix_band_part(block_length, local_length,
-1, block_length)
mask = (1.0 - good_part) * -1e9
mask = cast_like(mask, attention)
attention += tf.reshape(mask, [1, 1, 1, block_length, local_length])
attention = tf.nn.softmax(attention)
# TODO(noam): figure out how to show a summary for the remaining blocks.
# The naive way currently causes errors due to empty tensors.
# output: [batch, heads, num_blocks-1, block_length, depth_v]
output = tf.matmul(attention, local_v)
output = tf.reshape(
output, [batch, heads, (num_blocks - 1) * block_length, depth_v])
output = tf.concat([first_output, output], axis=2)
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output = tf.reshape(output, [batch, heads, original_length, depth_v])
return output
def masked_dilated_self_attention_1d(q,
k,
v,
query_block_size=64,
memory_block_size=64,
gap_size=2,
num_memory_blocks=2,
name=None):
"""dilated self-attention. TODO(avaswani): Try it and write a paper on it.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
query_block_size: an integer
memory_block_size: an integer indicating how much to look left.
gap_size: an integer indicating the gap size
num_memory_blocks: how many memory blocks to look at to the left. Each will
be separated by gap_size.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="masked_dilated_self_attention_1d", values=[q, k, v]):
v_list_shape = v.get_shape().as_list()
v_shape = shape_list(v)
depth_v = v_shape[3]
batch_size = v_shape[0]
num_heads = v_shape[1]
original_length = shape_list(q)[2]
# making sure q is a multiple of query block size
def pad_to_multiple(x, pad_length):
x_length = shape_list(x)[2]
return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]])
def pad_l(x, left_pad_length):
return tf.pad(x, [[0, 0], [0, 0], [left_pad_length, 0], [0, 0]])
q = pad_to_multiple(q, query_block_size)
v = pad_to_multiple(v, query_block_size)
k = pad_to_multiple(k, query_block_size)
q.set_shape(v_list_shape)
v.set_shape(v_list_shape)
k.set_shape(v_list_shape)
# Setting up q blocks
new_q_shape = shape_list(q)
# Setting up q blocks
q = reshape_by_blocks(q, new_q_shape, query_block_size)
self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size)
self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size)
# Setting up k and v windows
k_v_padding = (gap_size + memory_block_size) * num_memory_blocks
k = pad_l(k, k_v_padding)
v = pad_l(v, k_v_padding)
# Getting gather indices
index_length = (new_q_shape[2] - query_block_size + memory_block_size)
indices = tf.range(0, index_length, delta=1, name="index_range")
# Making indices [1, length, 1] to appy convs
indices = tf.reshape(indices, [1, -1, 1])
kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1)
gather_indices = tf.nn.conv1d(
tf.cast(indices, tf.float32),
kernel,
query_block_size,
padding="VALID",
name="gather_conv")
gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0)
# Get left and right memory blocks for each query
# [length, batch, heads, dim]
k_t = tf.transpose(k, [2, 0, 1, 3])
v_t = tf.transpose(v, [2, 0, 1, 3])
k_unmasked_windows = gather_dilated_memory_blocks(
k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size,
gather_indices)
v_unmasked_windows = gather_dilated_memory_blocks(
v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size,
gather_indices)
# combine memory windows
block_q_shape = shape_list(q)
masked_attention_bias = tf.tile(
tf.expand_dims(attention_bias_lower_triangle(
query_block_size), axis=0),
[block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1])
padding_attention_bias = tf.expand_dims(
embedding_to_padding(k_unmasked_windows) * -1e9, axis=-2)
padding_attention_bias = tf.tile(padding_attention_bias,
[1, 1, 1, query_block_size, 1])
attention_bias = tf.concat(
[masked_attention_bias, padding_attention_bias], axis=-1)
# Combine memory windows
k_windows = tf.concat([self_k_part, k_unmasked_windows], 3)
v_windows = tf.concat([self_v_part, v_unmasked_windows], 3)
output = dot_product_attention(
q,
k_windows,
v_windows,
attention_bias,
dropout_rate=0.,
name="dilated_1d")
output = tf.reshape(output, [batch_size, num_heads, -1, depth_v])
# Remove the padding if introduced
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape(v_list_shape)
return output
def dilated_self_attention_1d(q,
k,
v,
query_block_size=128,
memory_block_size=128,
gap_size=2,
num_memory_blocks=2,
name=None):
"""dilated self-attention.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
query_block_size: an integer indicating size of query block
memory_block_size: an integer indicating the size of a memory block.
gap_size: an integer indicating the gap size
num_memory_blocks: how many memory blocks to look at to the left and right.
Each will be separated by gap_size.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="dilated_self_attention_1d", values=[q, k, v]):
v_list_shape = v.get_shape().as_list()
v_shape = shape_list(v)
depth_v = v_shape[3]
batch_size = v_shape[0]
num_heads = v_shape[1]
original_length = shape_list(q)[2]
# making sure q is a multiple of query block size
def pad_to_multiple(x, pad_length):
x_length = shape_list(x)[2]
return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]])
def pad_l_and_r(x, pad_length):
return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]])
q = pad_to_multiple(q, query_block_size)
v = pad_to_multiple(v, query_block_size)
k = pad_to_multiple(k, query_block_size)
q.set_shape(v_list_shape)
v.set_shape(v_list_shape)
k.set_shape(v_list_shape)
# Setting up q blocks
new_q_shape = shape_list(q)
# Setting up q blocks
q = reshape_by_blocks(q, new_q_shape, query_block_size)
self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size)
self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size)
# Setting up k and v windows
k_v_padding = (gap_size + memory_block_size) * num_memory_blocks
k = pad_l_and_r(k, k_v_padding)
v = pad_l_and_r(v, k_v_padding)
# getting gather indices
index_length = (new_q_shape[2] - query_block_size + memory_block_size)
indices = tf.range(0, index_length, delta=1, name="index_range")
# making indices [1, length, 1] to appy convs
indices = tf.reshape(indices, [1, -1, 1])
kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1)
gather_indices = tf.nn.conv1d(
tf.cast(indices, tf.float32),
kernel,
query_block_size,
padding="VALID",
name="gather_conv")
gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0)
# get left and right memory blocks for each query
# [length, batch, heads, dim]
k_t = tf.transpose(k, [2, 0, 1, 3])
v_t = tf.transpose(v, [2, 0, 1, 3])
left_k = gather_dilated_memory_blocks(
k_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size,
query_block_size, memory_block_size, gather_indices)
left_v = gather_dilated_memory_blocks(
v_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size,
query_block_size, memory_block_size, gather_indices)
right_k = gather_dilated_memory_blocks(
k_t[k_v_padding:, :, :, :],
num_memory_blocks,
gap_size,
query_block_size,
memory_block_size,
gather_indices,
direction="right")
right_v = gather_dilated_memory_blocks(
v_t[k_v_padding:, :, :, :],
num_memory_blocks,
gap_size,
query_block_size,
memory_block_size,
gather_indices,
direction="right")
k_windows = tf.concat([left_k, self_k_part, right_k], axis=3)
v_windows = tf.concat([left_v, self_v_part, right_v], axis=3)
attention_bias = tf.expand_dims(
embedding_to_padding(k_windows) * -1e9, axis=-2)
output = dot_product_attention(
q,
k_windows,
v_windows,
attention_bias,
dropout_rate=0.,
name="dilated_1d")
output = tf.reshape(output, [batch_size, num_heads, -1, depth_v])
# Remove the padding if introduced
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape(v_list_shape)
return output
|
<gh_stars>10-100
import json
import click
import requests
import time
class BadRequestException(Exception):
def __init__(self, message, rv):
super(BadRequestException, self).__init__(message)
self.rv = rv
def api_get(domain, api_key, path):
url = "https://{}/api/v2/{}".format(domain, path)
rv = requests.get(url, auth=(api_key, ""))
if rv.status_code >= 400:
raise BadRequestException(
"Server returned status {}. Response:\n{}".format(
rv.status_code, json.dumps(rv.json())
),
rv,
)
return rv.json()
def api_post(domain, api_key, path, body):
url = "https://{}/api/v2/{}".format(domain, path)
rv = requests.post(url, json=body, auth=(api_key, ""))
if rv.status_code >= 400:
raise BadRequestException(
"Server returned status {}. Response:\n{}".format(
rv.status_code, json.dumps(rv.json())
),
rv,
)
return rv.json()
def wait_on_task_response(domain, api_key, task_resource):
task_id = task_resource["taskId"]
while True:
task_response = api_get(domain, api_key, "tasks/{}".format(task_id))
status = task_response["status"]
if status == "RUNNING":
time.sleep(10)
else:
if status == "FAILED":
return "FAILED", task_response["message"], task_response["errors"]
elif status == "SUCCEEDED":
return "SUCCEEDED", task_response["response"]
@click.command()
@click.option(
"--domain",
help="Domain name of your Benchling instance, e.g. example.benchling.com",
required=True,
)
@click.option("--api-key", help="Your API key", required=True)
@click.option(
"--folder-id", help="ID of a folder to create the antibody in", required=True
)
@click.option("--registry-id", help="ID of the Benchling Registry", required=True)
@click.option("--antibody-schema-id", help="ID of the Antibody schema", required=True)
@click.option(
"--chain-schema-id",
help="ID of the Chain schema (Must be an AA-Sequence)",
required=True,
)
@click.argument("json_file_to_import", type=click.File("r"))
def main(
domain,
api_key,
antibody_schema_id,
registry_id,
chain_schema_id,
folder_id,
json_file_to_import,
):
antibodies_obj = json.loads(json_file_to_import.read())
antibody_objs = antibodies_obj["antibodies"]
# Bulk create heavy chains into registry.
task_resource = api_post(
domain,
api_key,
# https://docs.benchling.com/reference#bulk-create-aa-sequences
"aa-sequences:bulk-create",
{
"aaSequences": [
{
"aminoAcids": antibody_obj["Heavy Chain"],
"folderId": folder_id,
"name": "Heavy Chain for {}".format(antibody_obj["name"]),
"schemaId": chain_schema_id,
"registryId": registry_id,
"namingStrategy": "NEW_IDS",
}
for antibody_obj in antibody_objs
]
},
)
task_response = wait_on_task_response(domain, api_key, task_resource)
if task_response[0] == "FAILED":
print(
"Could not register at least one heavy chain. Error response from server:\n{}\n{}".format(
task_response[1],
task_response[2],
)
)
return
bulk_registered_heavy_chain_response_obj = task_response[1]["aaSequences"]
print("Successfully registered heavy chains")
# Bulk create light chains into registry.
task_resource = api_post(
domain,
api_key,
# https://docs.benchling.com/reference#bulk-create-aa-sequences
"aa-sequences:bulk-create",
{
"aaSequences": [
{
"aminoAcids": antibody_obj["Light Chain"],
"folderId": folder_id,
"name": "Light Chain for {}".format(antibody_obj["name"]),
"schemaId": chain_schema_id,
"registryId": registry_id,
"namingStrategy": "NEW_IDS",
}
for antibody_obj in antibody_objs
]
},
)
task_response = wait_on_task_response(domain, api_key, task_resource)
if task_response[0] == "FAILED":
print(
"Could not register at least one light chain. Error response from server:\n{}\n{}".format(
task_response[1],
task_response[2],
)
)
return
bulk_registered_light_chain_response_obj = task_response[1]["aaSequences"]
print("Successfully registered light chains")
# Bulk create antibodies in registry.
task_resource = api_post(
domain,
api_key,
# https://docs.benchling.com/reference#bulk-create-custom-entities
"custom-entities:bulk-create",
{
"customEntities": [
{
"name": antibody_obj["name"],
"schemaId": antibody_schema_id,
"folderId": folder_id,
"registryId": registry_id,
"namingStrategy": "NEW_IDS",
"fields": {
"Heavy Chain": {"value": heavy_chain_obj["entityRegistryId"]},
"Light Chain": {"value": light_chain_obj["entityRegistryId"]},
},
}
for antibody_obj, heavy_chain_obj, light_chain_obj in zip(
antibody_objs,
bulk_registered_heavy_chain_response_obj,
bulk_registered_light_chain_response_obj,
)
]
},
)
task_response = wait_on_task_response(domain, api_key, task_resource)
if task_response[0] == "FAILED":
print(
"Could not register at least one antibody. Error response from server:\n{}\n{}".format(
task_response[1],
task_response[2],
)
)
return
bulk_registred_antibody_response_obj = task_response[1]["customEntities"]
for antibody_obj, heavy_chain_obj, light_chain_obj in zip(
bulk_registred_antibody_response_obj,
bulk_registered_heavy_chain_response_obj,
bulk_registered_light_chain_response_obj,
):
print(
"Registered new Antibody {} with Heavy Chain {} and Light Chain {}".format(
antibody_obj["entityRegistryId"],
heavy_chain_obj["entityRegistryId"],
light_chain_obj["entityRegistryId"],
)
)
if __name__ == "__main__":
main()
|
# TODO: Add folder_to_lib functionality
# Friday, February 5, 2020
"""
**folder_to_lib.py**
This module will be where the functions for taking path/to/folder outputs from cpgfunction are combined
into a library.json file
"""
# import os
# from . import platform_specific
# from . import fileio
# from . import handle_contents
# from . import featurerecognition
# from . import access
#
#
# class RecognizeFeatures:
# # TODO: this should take in the lib_style and a borefield object
# def __init__(self, bf: handle_contents.Borefield, lib_style: str):
# """
# Get information about a field by feature recognition.
#
# Parameters
# ----------
# bf: handle_contents.borefield
# A borefield object
# lib_style: str
# The library style:
# - "rectangle"
# - "L"
# - "U"
# - "Open"
# - "zoned"
# """
# self.bf = bf
# self.lib_style = lib_style
#
# a = 1
#
# @staticmethod
# def uniform_layout(path_to_file: str):
#
# data: dict = fileio.js_r(path_to_file) # read the data into a dictionary
# bf = handle_contents.Borefield(data) # load a borefield object using the dictionary
# x, y = list(zip(*bf.bore_locations)) # separate coordinates into x and y lists
# features = featurerecognition.FeatureRecognition(x, y)
# return features
#
# @staticmethod
# def zoned_rectangle(file_name: str):
# """
# Given a filename.json which is a filename associated with a zoned rectangle,
# return the Nx, Ny, Nix, Niy values.
#
# Returns
# -------
# key_contents: dict
# A dictionary containing the deciphered key with the following primary keys
#
# Nx: int
# Number of boreholes in the x-direction
# Ny: int
# Number of boreholes in the y-direction
# Nix: int
# Number of interior boreholes in the x-direction
# Niy: int
# Number of interior boreholes in the y-direction
# """
# # split the file by period
# split_file_period = file_name.split('.')
# # if the file extension is not provided, and just the string, then handle it
# if split_file_period[-1] == 'json':
# _key = split_file_period[-2]
# else:
# _key = split_file_period[-1]
#
# slash = platform_specific.get_slash_style() # get the \ or / slash
#
# split_key_by_slash = _key.split(slash) # split the path by that slash
#
# key = split_key_by_slash[-1] # the raw key
#
# split_key_by_underscore = key.split('_')
#
# Nx = int(split_key_by_underscore[1])
# Ny = int(split_key_by_underscore[3])
# Nix = int(split_key_by_underscore[4])
# Niy = int(split_key_by_underscore[6])
#
# key_contents = {'Nx': Nx, 'Ny': Ny, 'Nix': Nix, 'Niy': Niy}
#
# return key_contents
#
#
# class FolderToLib(RecognizeFeatures):
# """
#
#
# Parameters
# ----------
# path_to_folder
# lib_type: str
# - zoned
# - uniform (same for the following)
# - rectangle
# - L
# - U
# - Open
# """
# def __init__(self, path_to_folder, lib_type='zoned'):
# # super().__init__()
# self.path_to_folder = path_to_folder
# self.lib_type = lib_type
#
# def create_report(self):
# """
# Each library has different information stored in the report. This library uses the recognize features
# function to develop reports based on the given library type.
#
# Returns
# -------
# report_info: dict
# A report containing information associated with the library style
# """
# files = os.listdir(self.path_to_folder) # get a list of the files
# slash = platform_specific.get_slash_style()
# path_to_folder_split = self.path_to_folder.split(slash)
# if path_to_folder_split[-1] == '':
# del path_to_folder_split[-1]
#
# report_info: dict = {'file_path': []} # place to hold the information for the report
#
# for i, file in enumerate(files):
# path_to_file_list = path_to_folder_split + [file]
# path_to_file = slash.join(path_to_file_list)
# report_info['file_path'].append(path_to_file)
# data: dict = fileio.js_r(path_to_file)
# bf = handle_contents.Borefield(data)
# # get the relevant information
# relevant_info = featurerecognition.recognize_features(bf, self.lib_type)
#
# # load the relevant information into the report_info dictionary
# for key in relevant_info:
# if not key in report_info:
# report_info[key] = []
# report_info[key].append(relevant_info[key])
#
# return report_info
#
# def create_lib_file(self):
# """
# This uses the information in the report to create a library file
#
# Returns
# -------
# library_file: dict
# A library file to be used stored for use in the package.
# """
# lib_info = access.LibraryAccess(lib_style=None, display=False)
#
# library_file = {}
#
# report_info = self.create_report()
#
# file_paths: list = report_info['file_path']
# del report_info['file_path']
#
# keys = list(report_info.keys())
# n_rows = len(report_info[keys[0]])
#
# for i in range(n_rows):
# file_path = file_paths[i]
# Nx = report_info['Nx'][i]
# Ny = report_info['Ny'][i]
# # create a key for the library
# key = lib_info.create_key(Nx, Ny)
# # if the key is not in the library file dictionary, then key it
# if key in library_file:
# pass
# else:
# library_file[key] = {}
#
# if 'zoned' in self.lib_type:
# Nx = report_info['Nx'][i]
# Ny = report_info['Ny'][i]
# Nix = report_info['Nix'][i]
# Niy = report_info['Niy'][i]
# file_path = file_paths[i]
# # the primary key in the zoned rectangle lib file
# primary_key = str(Nx) + '_' + str(Ny)
# if primary_key not in library_file:
# library_file[primary_key] = {}
# # the secondary key in the zoned rectangle lib file
# secondary_key = str(Nix) + '_' + str(Niy)
#
# # read the cpgfunction output into a dictionary
# cpgf_output = fileio.js_r(file_path)
#
# library_file[primary_key][secondary_key] = cpgf_output
# elif 'uniform' in self.lib_type:
# # store these as MXN where M < N
# n = int(report_info['Nx'][i])
# m = int(report_info['Ny'][i])
# if n < m: # swap if necessary
# m, n = n, m
# key = str(m) + '_' + str(n)
#
# # read the cpgfunction output into a dictionary
# file_path = file_paths[i]
# cpgf_output = fileio.js_r(file_path)
#
# library_file[key] = cpgf_output
# elif 'U' in self.lib_type or 'Open' in self.lib_type:
# nested = report_info['nested'][i]
# library_file[key][nested] = fileio.js_r(file_path)
# else:
# raise ValueError('The library type input is not currently handled by this object.')
#
# return library_file
|
<reponame>yanhuaijun/test01
from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
# Register your models here.
class UserType(models.Model):
name = models.CharField(max_length=32)
class wxuser(models.Model): #微信用户表
unionid = models.CharField(max_length=32)
openid = models.CharField(max_length=32)
stats = models.CharField(max_length=32)
userid = models.CharField(max_length=32)
create_time=models.CharField(max_length=32)
upeate_time =models.CharField(max_length=32)
class UserInfo(models.Model): #用户表
username = models.CharField(max_length=32)
pwd = models.CharField(max_length=32)
email = models.CharField(max_length=32)
user_type = models.ForeignKey('UserType', on_delete=models.CASCADE, )
create_by=models.CharField(max_length=32)
create_date =models.CharField(max_length=32)
upeate_by =models.CharField(max_length=32)
upeate_date =models.CharField(max_length=32)
class User(models.Model): #用户表
username = models.CharField(max_length=32)
pwd = models.CharField(max_length=32)
email = models.CharField(max_length=32)
Mobile_phone=models.CharField(max_length=32)
create_by=models.CharField(max_length=32)
create_date =models.CharField(max_length=32)
upeate_by =models.CharField(max_length=32)
upeate_date =models.CharField(max_length=32)
class mood (models.Model):#心情文章表
title=models.CharField(max_length=500)
content=models.CharField(max_length=50000)
create_by = models.CharField(max_length=32)
create_date = models.CharField(max_length=32)
upeate_by = models.CharField(max_length=32)
upeate_date = models.CharField(max_length=32)
class Love (models.Model):#爱情文章表
title=models.CharField(max_length=500)
content=models.CharField(max_length=50000)
create_by = models.CharField(max_length=32)
create_date = models.CharField(max_length=32)
upeate_by = models.CharField(max_length=32)
upeate_date = models.CharField(max_length=32)
class Chicken_soup (models.Model):#鸡汤文章表
title=models.CharField(max_length=500)
content=models.CharField(max_length=50000)
create_by = models.CharField(max_length=32)
create_date = models.CharField(max_length=32)
upeate_by = models.CharField(max_length=32)
upeate_date = models.CharField(max_length=32)
class Sentimental(models.Model): # 伤感文章表
title = models.CharField(max_length=500)
content = models.CharField(max_length=50000)
create_by = models.CharField(max_length=32)
create_date = models.CharField(max_length=32)
upeate_by = models.CharField(max_length=32)
upeate_date = models.CharField(max_length=32)
class Struggle(models.Model): # 奋斗文章表
title = models.CharField(max_length=500)#标题
content = models.CharField(max_length=50000)#内容
create_by = models.CharField(max_length=32)#创建者
create_date = models.CharField(max_length=32)#创建时间
upeate_by = models.CharField(max_length=32)#修改者
upeate_date = models.CharField(max_length=32)#修改时间
class music(models.Model): # 音乐表
title = models.CharField(max_length=500) # 标题
author = models.CharField(max_length=500) # 作者
play_url=models.CharField(max_length=50000) # 播放地址
picture_url=models.CharField(max_length=50000) # 图片地址 |
<reponame>FrederichRiver/neutrino<gh_stars>1-10
#!/usr/bin/python3
# from statsmodels.tsa.arima_model import ARIMA
import datetime
import pandas as pd
import numpy as np
import requests
import time
import random
from env import global_header
from libmysql8 import mysqlHeader, mysqlBase
from libstock import wavelet_nr, StockEventBase, str2zero
from libstratagy import StratagyBase
from lxml import etree
from data_feature import ma
from form import formFinanceTemplate, formBalance
from utils import str2number
from utils import RandomHeader, read_url
__version__ = '1.6.8-beta'
# ARIMA
def fetch_finance_info(stock_code):
url = f"http://quotes.money.163.com/f10/zycwzb_{stock_code[2:]}.html#01c02"
table = fetch_html_table(url, attr="[@class='table_bg001 border_box limit_sale scr_table']")
t = pd.read_html(table)[0]
result = t.T
header = mysqlHeader('root', '6414939', 'test')
mysql = mysqlBase(header)
for index, row in result.iterrows():
insert_sql = (
"insert into finance_info (stock_code, report_date, eps, roe)"
f"values ('{stock_code}','{index}',{str2zero(row[0])},{str2zero(row[18])})")
update_sql = (
f"update finance_info set eps={str2zero(row[0])}, roe={str2zero(row[18])} "
f"where (stock_code='{stock_code}' and report_date='{index}')")
try:
mysql.engine.execute(sql)
except Exception as e:
mysql.engine.execute(update_sql)
class Stratagy1(StockEventBase):
def fetch_report_period(self):
"""
Return : period, q1, q2, q3, q4
"""
now = datetime.datetime.now()
year = range(1990, now.year)
q1 = [f"{y}-03-31" for y in year]
q2 = [f"{y}-06-30" for y in year]
q3 = [f"{y}-09-30" for y in year]
q4 = [f"{y}-12-31" for y in year]
period = q1 + q2 + q3 + q4
return period, q1, q2, q3, q4
def fetch_data(self):
period[0] = self.fetch_report_period()
for p in period:
print(p)
try:
sql = (
"SELECT stock_code,r4_net_profit,r3_5_income_tax,r2_5_finance_expense,r1_1_revenue "
f"from income_statement_template where report_period='{p}'"
)
income_statement = self.mysql.engine.execute(sql).fetchall()
income_statement = pd.DataFrame.from_dict(income_statement)
if not income_statement.empty:
income_statement.set_index(0, inplace=True)
income_statement.columns = ['net_profit', 'income_tax', 'finance_expense', 'revenue']
sql2 = (
"SELECT stock_code,r1_assets,r1_1_bank_and_cash,r5_3_accounts_payable "
f"from balance_sheet_template where report_period='{p}'"
)
balance_sheet = self.mysql.engine.execute(sql2).fetchall()
balance_sheet = pd.DataFrame.from_dict(balance_sheet)
if not balance_sheet.empty:
balance_sheet.set_index(0, inplace=True)
balance_sheet.columns = ['asset', 'bank_cash', 'accout_payable']
result = pd.concat([income_statement, balance_sheet], axis=1, join='outer', sort= False)
if not result.empty:
result['EBIT'] = (result['net_profit'] + result['income_tax'] + result['finance_expense'])/result['revenue']
result['NOPLAT'] = result['EBIT']*(1-0.3)
result['ROIC'] = result['NOPLAT']
result[np.isinf(result)] = np.nan
result.dropna(inplace=True)
print(result.head(5))
for index, row in result.iterrows():
insert_sql = (
"INSERT INTO finance_factor ("
"stock_code, report_period, ebit, roic)"
"VALUES ( "
f"'{index}','{p}',{row['EBIT']},{row['ROIC']})"
)
update_sql = (
f"UPDATE finance_factor set stock_code='{index}',"
f"report_period='{p}', ebit={row['EBIT']},"
f"roic={row['ROIC']} "
f"WHERE stock_code='{index}' and report_period='{p}'"
)
try:
# print('Insert:', index)
self.mysql.engine.execute(insert_sql)
except Exception:
# print('Update', index)
self.mysql.engine.execute(update_sql)
except Exception as e:
print('Error:', e)
class TradeDate(object):
def chinese_holiday(self, year):
holiday = [
(year, 1, 1), (year, 1, 2), (year, 1, 3),
(year, 4, 5), (year, 5, 1), (year, 5, 2),
(year, 5, 3), (year, 10, 1), (year, 10, 2),
(year, 10, 3), (year, 10, 4), (year, 10, 5),
(year, 10, 6), (year, 10, 7)
]
return holiday
|
# Generated by Django 3.2 on 2021-04-20 16:44
from django.db import migrations, models
import django.db.models.deletion
import playlists.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('videos', '0001_initial'),
('categories', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(default=1)),
('title', models.CharField(max_length=220)),
('type', models.CharField(choices=[('MOV', 'Movie'), ('TVS', 'TV Show'), ('SEA', 'Season'), ('PLY', 'Playlist')], default='PLY', max_length=3)),
('description', models.TextField(blank=True, null=True)),
('slug', models.SlugField(blank=True, null=True)),
('active', models.BooleanField(default=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('state', models.CharField(choices=[('PU', 'Publish'), ('DR', 'Draft')], default='DR', max_length=2)),
('publish_timestamp', models.DateTimeField(blank=True, null=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='playlists', to='categories.category')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='playlists.playlist')),
],
),
migrations.CreateModel(
name='PlaylistRelated',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(default=1)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('playlist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='playlists.playlist')),
('related', models.ForeignKey(limit_choices_to=playlists.models.pr_limit_choices_to, on_delete=django.db.models.deletion.CASCADE, related_name='related_item', to='playlists.playlist')),
],
),
migrations.CreateModel(
name='PlaylistItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(default=1)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('playlist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='playlists.playlist')),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='videos.video')),
],
options={
'ordering': ['order', '-timestamp'],
},
),
migrations.AddField(
model_name='playlist',
name='related',
field=models.ManyToManyField(blank=True, related_name='_playlists_playlist_related_+', through='playlists.PlaylistRelated', to='playlists.Playlist'),
),
migrations.AddField(
model_name='playlist',
name='video',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='playlist_featured', to='videos.video'),
),
migrations.AddField(
model_name='playlist',
name='videos',
field=models.ManyToManyField(blank=True, related_name='playlist_item', through='playlists.PlaylistItem', to='videos.Video'),
),
migrations.CreateModel(
name='MovieProxy',
fields=[
],
options={
'verbose_name': 'Movie',
'verbose_name_plural': 'Movies',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('playlists.playlist',),
),
migrations.CreateModel(
name='TVShowProxy',
fields=[
],
options={
'verbose_name': 'TV Show',
'verbose_name_plural': 'TV Shows',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('playlists.playlist',),
),
migrations.CreateModel(
name='TVShowSeasonProxy',
fields=[
],
options={
'verbose_name': 'Season',
'verbose_name_plural': 'Seasons',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('playlists.playlist',),
),
]
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
import yaml
from rest_framework import status
from stackdio.api.cloud import models
from stackdio.core.tests.utils import PermissionsMixin, StackdioTestCase
logger = logging.getLogger(__name__)
class CloudProviderTestCase(StackdioTestCase):
"""
Tests for CloudProvider things
"""
def setUp(self):
super(CloudProviderTestCase, self).setUp()
self.client.login(username='test.admin', password='<PASSWORD>')
def test_create_provider(self):
# No creation should be allowed via the API, neither as an admin or non
response = self.client.post('/api/cloud/providers/', {'title': 'new'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
# Try as non-admin
self.client.logout()
self.client.login(username='test.user', password='<PASSWORD>')
response = self.client.post('/api/cloud/providers/', {'title': 'new'})
# Should just be forbidden now
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class CloudAccountTestCase(StackdioTestCase, PermissionsMixin):
"""
Tests for CloudAccount things
"""
permission_tests = {
'model': models.CloudAccount,
'create_data': {
'provider_id': 1,
'title': 'test',
'description': 'test',
'vpc_id': 'vpc-blah',
'region_id': 1,
},
'endpoint': '/api/cloud/accounts/{0}/',
'permission': 'cloud.%s_cloudaccount',
'permission_types': [
{
'perm': 'view', 'method': 'get'
},
{
'perm': 'update', 'method': 'patch', 'data': {'title': 'test2'}
},
{
'perm': 'delete', 'method': 'delete', 'code': status.HTTP_204_NO_CONTENT
},
]
}
def set_up_perms(self):
super(CloudAccountTestCase, self).set_up_perms()
# Generate the yaml and store in the database
yaml_data = {
self.obj.slug: {
'securitygroupid': []
}
}
self.obj.yaml = yaml.safe_dump(yaml_data, default_flow_style=False)
self.obj.save()
# Update the salt cloud providers file
self.obj.update_config()
def test_view_account_as_admin(self):
self.client.login(username='test.admin', password='<PASSWORD>')
response = self.client.get('/api/cloud/accounts/{0}/'.format(self.obj.pk))
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CloudImageTestCase(StackdioTestCase, PermissionsMixin):
"""
Tests for CloudAccount things
"""
permission_tests = {
'model': models.CloudImage,
'create_data': {
'title': 'test',
'description': 'test',
'image_id': 'blah',
'default_instance_size_id': 1,
'ssh_user': 'root',
},
'endpoint': '/api/cloud/images/{0}/',
'permission': 'cloud.%s_cloudimage',
'permission_types': [
{
'perm': 'view', 'method': 'get'
},
{
'perm': 'update', 'method': 'patch', 'data': {'title': 'test2'}
},
{
'perm': 'delete', 'method': 'delete', 'code': status.HTTP_204_NO_CONTENT
},
]
}
def set_up_perms(self):
account = models.CloudAccount.objects.create(
**CloudAccountTestCase.permission_tests['create_data']
)
self.obj = models.CloudImage.objects.create(account=account,
**self.permission_tests['create_data'])
|
from django.contrib import admin
from rolepermissions.roles import assign_role
from .models import (BtcDepositAddress, Category, Profile, Listing,
BtcAddressChangeHistory, Transaction, BalanceChange,
Fee, AffiliateCommision, Withdrawal, Message, Feedback, BtcPrice)
def make_vendor(modeladmin, request, queryset):
for user in queryset:
assign_role(user.user, 'vendor')
make_vendor.short_description = "Assign selected users vendor status"
@admin.register(BtcDepositAddress)
class BtcDepositAddressAdmin(admin.ModelAdmin):
list_display = ('btc_address', 'used_before', 'in_use')
list_filter = ('used_before', 'in_use')
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
@admin.register(BtcPrice)
class BtcPriceAdmin(admin.ModelAdmin):
list_display = ('current', 'created_time')
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'btc_balance', 'escrow_balance', 'current_deposit_address', 'referrer', 'registration_date')
list_filter = ('btc_balance', 'escrow_balance', 'registration_date')
search_fields = ('current_deposit_address__btc_address',)
actions = [make_vendor]
@admin.register(Listing)
class ListingAdmin(admin.ModelAdmin):
list_display = ('title', 'vendor', 'price_in_pln', 'shipping_cost_pln', 'category', 'number_sold', 'created_at', 'updated_at')
list_filter = ('category', 'created_at', 'updated_at')
search_fields = ('vendor__user__username', 'title')
@admin.register(BtcAddressChangeHistory)
class BtcAddressChangeHistoryAdmin(admin.ModelAdmin):
list_display = ('user', 'btc_address', 'assigned_at')
list_filter = ('assigned_at',)
search_fields = ('user__user__username', 'btc_address__btc_address')
@admin.register(Transaction)
class TransactionAdmin(admin.ModelAdmin):
list_display = ('customer', 'vendor', 'btc_value_total', 'btc_fee', 'btc_net_pay', 'listing', 'quantity',
'is_finalized', 'auto_finalize_time', 'finalized_time', 'created_time', 'has_affiliate')
list_filter = ('is_finalized', 'auto_finalize_time', 'finalized_time', 'created_time', 'has_affiliate')
search_fields = ('user__user__username', 'vendor__user__username')
@admin.register(BalanceChange)
class BalanceChangeAdmin(admin.ModelAdmin):
list_display = ('type', 'user', 'amount', 'transaction', 'escrow_balance', 'btc_balance', 'increase', 'decrease', 'success', 'created_at')
list_filter = ('type', 'increase', 'decrease', 'success', 'created_at')
search_fields = ('user__user__username',)
@admin.register(Fee)
class FeeAdminAdmin(admin.ModelAdmin):
pass
@admin.register(AffiliateCommision)
class AffiliateCommisionAdmin(admin.ModelAdmin):
list_display = ('transaction', 'user', 'amount', 'created_at', 'finalized', 'credited_at')
list_filter = ('transaction', 'user', 'amount', 'created_at', 'finalized', 'credited_at')
@admin.register(Withdrawal)
class WithdrawalAdmin(admin.ModelAdmin):
list_display = ('status', 'amount', 'to_address', 'user', 'time_placed', 'pending')
list_filter = ('status', 'time_placed', 'pending')
search_fields = ('user__user__username',)
@admin.register(Message)
class MessageAdmin(admin.ModelAdmin):
list_display = ('sender', 'receiver', 'content', 'time_sent')
list_filter = ('time_sent',)
@admin.register(Feedback)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ('customer', 'vendor', 'content', 'type', 'listing', 'created_at')
list_filter = ('customer', 'vendor', 'content', 'type', 'listing', 'created_at')
|
<gh_stars>0
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import json
from abc import abstractproperty, ABCMeta
from urlparse import urlparse
from pika.exceptions import ChannelClosed
__author__ = '<NAME>'
import StringIO
import uuid
import logging
import pika
from rdflib import Graph, RDF, Literal, BNode, URIRef
from rdflib.namespace import Namespace, FOAF, XSD
from agora.client.wrapper import Agora
import time
from threading import Thread
from datetime import datetime
log = logging.getLogger('sdh.curator.client')
CURATOR = Namespace('http://www.smartdeveloperhub.org/vocabulary/curator#')
TYPES = Namespace('http://www.smartdeveloperhub.org/vocabulary/types#')
AMQP = Namespace('http://www.smartdeveloperhub.org/vocabulary/amqp#')
class RequestGraph(Graph):
__metaclass__ = ABCMeta
def __init__(self):
super(RequestGraph, self).__init__()
self._request_node = BNode()
self._agent_node = BNode()
self._broker_node = BNode()
self._channel_node = BNode()
self._message_id = self._agent_id = self._submitted_on = self._exchange_name = None
self._routing_key = self._broker_host = self._broker_port = self._broker_vh = None
# Node binding
self.add((self.request_node, CURATOR.replyTo, self.channel_node))
self.add((self.request_node, CURATOR.submittedBy, self.agent_node))
self.add((self.channel_node, RDF.type, CURATOR.DeliveryChannel))
self.add((self.broker_node, RDF.type, AMQP.Broker))
self.add((self.channel_node, AMQP.broker, self.broker_node))
self.add((self.agent_node, RDF.type, FOAF.Agent))
# Default graph
self.message_id = uuid.uuid4()
self.submitted_on = datetime.now()
self.agent_id = uuid.uuid4()
self.exchange_name = ""
self.routing_key = ""
self.broker_host = "localhost"
self.broker_port = 5672
self.broker_vh = "/"
self.bind('curator', CURATOR)
self.bind('amqp', AMQP)
self.bind('foaf', FOAF)
self.bind('types', TYPES)
@property
def request_node(self):
return self._request_node
@property
def broker_node(self):
return self._broker_node
@property
def channel_node(self):
return self._channel_node
@property
def agent_node(self):
return self._agent_node
@property
def message_id(self):
return self._message_id
@abstractproperty
def type(self):
pass
@message_id.setter
def message_id(self, value):
self._message_id = Literal(str(value), datatype=TYPES.UUID)
self.set((self._request_node, CURATOR.messageId, self._message_id))
@property
def agent_id(self):
return self._agent_id
@agent_id.setter
def agent_id(self, value):
self._agent_id = Literal(str(value), datatype=TYPES.UUID)
self.set((self._agent_node, CURATOR.agentId, self._agent_id))
@property
def submitted_on(self):
return self._submitted_on
@submitted_on.setter
def submitted_on(self, value):
self._submitted_on = Literal(value)
self.set((self._request_node, CURATOR.submittedOn, self._submitted_on))
@property
def exchange_name(self):
return self._exchange_name
@exchange_name.setter
def exchange_name(self, value):
self._exchange_name = Literal(value, datatype=TYPES.Name)
self.set((self.channel_node, AMQP.exchangeName, self._exchange_name))
@property
def routing_key(self):
return self._routing_key
@routing_key.setter
def routing_key(self, value):
self._routing_key = Literal(value, datatype=TYPES.Name)
self.set((self.channel_node, AMQP.routingKey, self._routing_key))
@property
def broker_host(self):
return self._broker_host
@broker_host.setter
def broker_host(self, value):
self._broker_host = Literal(value, datatype=TYPES.Hostname)
self.set((self.broker_node, AMQP.host, self._broker_host))
@property
def broker_port(self):
return self._broker_port
@broker_port.setter
def broker_port(self, value):
self._broker_port = Literal(value, datatype=TYPES.Port)
self.set((self.broker_node, AMQP.port, self._broker_port))
@property
def broker_vh(self):
return self._broker_vh
@broker_vh.setter
def broker_vh(self, value):
self._broker_vh = Literal(value, datatype=TYPES.Path)
self.set((self.broker_node, AMQP.virtualHost, self._broker_vh))
def transform(self, elem):
return elem
class FragmentRequestGraph(RequestGraph):
__metaclass__ = ABCMeta
@staticmethod
def __is_variable(elm):
return elm.startswith('?')
def __extend_uri(self, short):
"""
Extend a prefixed uri with the help of a specific dictionary of prefixes
:param short: Prefixed uri to be extended
:return:
"""
if short == 'a':
return RDF.type
for prefix in sorted(self.__prefixes, key=lambda x: len(x), reverse=True):
if short.startswith(prefix):
return URIRef(short.replace(prefix + ':', self.__prefixes[prefix]))
return short
def is_uri(self, uri):
if uri.startswith('<') and uri.endswith('>'):
uri = uri.lstrip('<').rstrip('>')
parse = urlparse(uri, allow_fragments=True)
return bool(len(parse.scheme))
elif ':' in uri:
prefix_parts = uri.split(':')
return len(prefix_parts) == 2 and prefix_parts[0] in self.__prefixes
return uri == 'a'
def __init__(self, *args, **kwargs):
super(FragmentRequestGraph, self).__init__()
if not args:
raise AttributeError('A graph pattern must be provided')
self.__prefixes = kwargs.get('prefixes', None)
if self.__prefixes is None:
raise AttributeError('A prefixes list must be provided')
elements = {}
for tp in args:
s, p, o = tuple(tp.strip().split(' '))
if s not in elements:
if self.__is_variable(s):
elements[s] = BNode(s)
self.set((elements[s], RDF.type, CURATOR.Variable))
self.set((elements[s], CURATOR.label, Literal(s, datatype=XSD.string)))
if p not in elements:
if self.is_uri(p):
elements[p] = self.__extend_uri(p)
if o not in elements:
if self.__is_variable(o):
elements[o] = BNode(o)
self.set((elements[o], RDF.type, CURATOR.Variable))
self.set((elements[o], CURATOR.label, Literal(o, datatype=XSD.string)))
elif self.is_uri(o):
elements[o] = self.__extend_uri(o)
else:
elements[o] = Literal(o)
self.add((elements[s], elements[p], elements[o]))
class StreamRequestGraph(FragmentRequestGraph):
def __init__(self, *args, **kwargs):
super(StreamRequestGraph, self).__init__(*args, **kwargs)
self.add((self.request_node, RDF.type, CURATOR.StreamRequest))
@property
def type(self):
return 'stream'
def transform(self, quad):
def __transform(x):
if type(x) == str or type(x) == unicode:
if self.is_uri(x):
return URIRef(x.lstrip('<').rstrip('>'))
elif '^^' in x:
(value, ty) = tuple(x.split('^^'))
return Literal(value.replace('"', ''), datatype=URIRef(ty.lstrip('<').rstrip('>')))
elif x.startswith('_:'):
return BNode(x.replace('_:', ''))
else:
return Literal(x.replace('"', ''), datatype=XSD.string)
return x
triple = quad[1:]
return tuple([quad[0]] + map(__transform, triple))
class QueryRequestGraph(FragmentRequestGraph):
def __init__(self, *args, **kwargs):
super(QueryRequestGraph, self).__init__(*args, **kwargs)
self.add((self.request_node, RDF.type, CURATOR.QueryRequest))
@property
def type(self):
return 'query'
class CuratorClient(object):
def __init__(self, broker_host='localhost', broker_port=5672, wait=False, monitoring=None, agora_host='localhost',
agora_port=5002, stop_event=None):
self.agora = Agora(host=agora_host, port=agora_port)
self.__connection = pika.BlockingConnection(pika.ConnectionParameters(host=broker_host, port=broker_port))
self.__channel = self.__connection.channel()
self.__listening = False
self.__accept_queue = self.__response_queue = None
self.__monitor = Thread(target=self.__monitor_consume, args=[monitoring]) if monitoring else None
self.__last_consume = datetime.now()
self.__keep_monitoring = True
self.__accepted = False
self.__message = None
self.__wait = wait
self.__stop_event = stop_event
def __monitor_consume(self, t):
log.debug('Curator client monitor started...')
while self.__keep_monitoring:
if (datetime.now() - self.__last_consume).seconds > t:
self.stop()
break
else:
time.sleep(1)
def request(self, message):
self.__response_queue = self.__channel.queue_declare(auto_delete=True).method.queue
message.routing_key = self.__response_queue
self.__message = message
self.__accept_queue = self.__channel.queue_declare(auto_delete=True).method.queue
self.__channel.queue_bind(exchange='sdh', queue=self.__accept_queue,
routing_key='curator.response.{}'.format(str(message.agent_id)))
self.__channel.basic_publish(exchange='sdh',
routing_key='curator.request.{}'.format(self.__message.type),
body=message.serialize(format='turtle'))
log.info('sent {} request!'.format(self.__message.type))
self.__listening = True
return self.agora.prefixes, self.__consume()
def __consume(self):
def __response_callback(properties, body):
if properties.headers.get('state', None) == 'end':
log.info('End of stream received!')
self.stop()
else:
try:
items = json.loads(body)
except ValueError:
items = eval(body)
if not isinstance(items, list):
items = [items]
for item in items:
yield properties.headers, item
log.debug('Waiting for acceptance...')
for message in self.__channel.consume(self.__accept_queue, no_ack=True, inactivity_timeout=1):
if message is not None:
method, properties, body = message
g = Graph()
g.parse(StringIO.StringIO(body), format='turtle')
if len(list(g.subjects(RDF.type, CURATOR.Accepted))) == 1:
log.info('Request accepted!')
self.__accepted = True
else:
log.error('Bad request!')
self.__channel.queue_delete(self.__accept_queue)
self.__channel.cancel()
break
elif self.__stop_event is not None:
if self.__stop_event.isSet():
self.stop()
if not self.__accepted:
log.debug('Request not accepted. Aborting...')
raise StopIteration()
if self.__monitor is not None:
self.__monitor.start()
log.debug('Listening...')
for message in self.__channel.consume(self.__response_queue, no_ack=True, inactivity_timeout=1):
if message is not None:
method, properties, body = message
for headers, item in __response_callback(properties, body):
yield headers, self.__message.transform(item)
elif not self.__wait:
yield None
elif self.__stop_event is not None:
if self.__stop_event.isSet():
self.stop()
raise StopIteration()
else:
log.debug('Inactivity timeout...')
self.__last_consume = datetime.now()
if self.__monitor is not None:
self.__keep_monitoring = False
log.debug('Waiting for client monitor to stop...')
self.__monitor.join()
def stop(self):
try:
self.__channel.queue_delete(self.__accept_queue)
self.__channel.queue_delete(self.__response_queue)
self.__channel.cancel()
self.__channel.close()
self.__listening = False
except ChannelClosed:
pass
log.debug('Stopped curator client!')
@property
def listening(self):
return self.__listening
def get_fragment_generator(*args, **kwargs):
client = CuratorClient(**kwargs)
request = StreamRequestGraph(prefixes=client.agora.prefixes, *args)
request.broker_host = kwargs['broker_host']
return client.request(request)
def get_query_generator(*args, **kwargs):
client = CuratorClient(**kwargs)
request = QueryRequestGraph(prefixes=client.agora.prefixes, *args)
request.broker_host = kwargs['broker_host']
return client.request(request)
|
<filename>NT_UDA/demo_syn_dnn.py
# -*- coding: utf-8 -*-
# A Survey on Negative Transfer
# https://github.com/chamwen/NT-Benchmark
import numpy as np
import argparse
import random
import os
import torch as tr
import torch.nn as nn
import torch.optim as optim
from utils import network, loss, utils
from utils.dataloader import read_syn_src_tar
from utils.utils import fix_random_seed, op_copy, lr_scheduler, data_load_noimg
def train_source_test_target(args):
X_src, y_src, X_tar, y_tar = read_syn_src_tar(args)
dset_loaders = data_load_noimg(X_src, y_src, X_tar, y_tar, args)
netF, netC = network.backbone_net(args, args.bottleneck)
netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))
netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))
base_network = nn.Sequential(netF, netC)
param_group = []
learning_rate = args.lr
for k, v in netF.named_parameters():
param_group += [{'params': v, 'lr': learning_rate * 0.1}]
for k, v in netC.named_parameters():
param_group += [{'params': v, 'lr': learning_rate}]
optimizer = optim.SGD(param_group)
optimizer = op_copy(optimizer)
acc_init = 0
max_iter = args.max_epoch * len(dset_loaders["source_tr"])
interval_iter = max_iter // 10
args.max_iter = args.max_epoch * len(dset_loaders["source_tr"])
iter_num = 0
base_network.train()
while iter_num < max_iter:
try:
inputs_source, labels_source = source_loader_iter.next()
except:
source_loader_iter = iter(dset_loaders["source_tr"])
inputs_source, labels_source = source_loader_iter.next()
if inputs_source.size(0) == 1:
continue
iter_num += 1
lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)
inputs_source, labels_source = inputs_source.cuda(), labels_source.cuda()
features_source, outputs_source = base_network(inputs_source)
# classifier_loss = loss.CELabelSmooth(reduction='none', num_classes=class_num, epsilon=args.smooth)(
# outputs_source, labels_source)
classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
optimizer.zero_grad()
classifier_loss.backward()
optimizer.step()
if iter_num % interval_iter == 0 or iter_num == max_iter:
base_network.eval()
acc_s_te = utils.cal_acc_base(dset_loaders["source_te"], base_network)
acc_t_te = utils.cal_acc_base(dset_loaders["Target"], base_network)
log_str = 'Task: {}, Iter:{}/{}; Val_acc = {:.2f}%; Test_Acc = {:.2f}%'.format(args.task_str, iter_num,
max_iter, acc_s_te, acc_t_te)
print(log_str)
base_network.train()
if acc_s_te >= acc_init:
acc_init = acc_s_te
acc_tar_src_best = acc_t_te
return acc_tar_src_best
if __name__ == '__main__':
data_name = 'moon'
seed = 2022
if data_name == 'moon': num_class = 2
noise_rate = 0
base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']
domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']
file_list = [data_name + i for i in base_name_list]
num_domain = len(domain_list)
args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,
epsilon=1e-05, layer='wn', class_num=num_class, smooth=0,
is_save=False, ins_num=600)
args.method = 'DNN'
args.dset = data_name
args.backbone = 'ShallowNet'
args.batch_size = 32
args.max_epoch = 50
args.input_dim = 2
args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'
args.noise_rate = 0
dset_n = args.dset + '_' + str(args.noise_rate)
os.environ["CUDA_VISIBLE_DEVICES"] = '6'
args.data_env = 'gpu' # 'local'
args.seed = 2022
fix_random_seed(args.seed)
tr.backends.cudnn.deterministic = True
print(dset_n, args.method)
args.root_path = './data_synth/'
args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_UDA/'
args.result_dir = 'results/target/'
tr.manual_seed(args.seed)
tr.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
acc_all = np.zeros((len(domain_list) - 1))
for s in range(1, num_domain): # source
for t in [0]: # target
itr_idx = s - 1
info_str = '\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])
print(info_str)
args.src, args.tar = file_list[s], file_list[t]
args.task_str = domain_list[s] + '_' + domain_list[t]
print(args)
acc_all[itr_idx] = train_source_test_target(args)
print('done\n')
print('\n\nfinish one repeat')
print('\nAll acc: ', np.round(acc_all, 2))
print('Avg acc: ', np.round(np.mean(acc_all), 2))
|
# -*- coding: utf-8 -*-
import gc
import json
import structlog
from django.db import migrations
log = structlog.get_logger(__name__)
def chunks(queryset, chunksize=1000):
pk = 0
queryset = queryset.order_by('pk')
last_instance = queryset.last()
if last_instance is not None:
last_pk = last_instance.pk
while pk < last_pk:
for row in queryset.filter(pk__gt=pk)[:chunksize]:
pk = row.pk
yield row
gc.collect()
def forwards_move_repos(apps, schema_editor):
"""Moves OAuth repos."""
db = schema_editor.connection.alias
# Organizations
GithubOrganization = apps.get_model('oauth', 'GithubOrganization')
BitbucketTeam = apps.get_model('oauth', 'BitbucketTeam')
RemoteOrganization = apps.get_model('oauth', 'RemoteOrganization')
for org in chunks(GithubOrganization.objects.all()):
new_org = RemoteOrganization.objects.using(db).create(
pub_date=org.pub_date,
modified_date=org.modified_date,
active=org.active,
slug=org.login,
name=org.name,
email=org.email,
url=org.html_url,
source='github',
)
for user in org.users.iterator():
new_org.users.add(user)
try:
data = eval(org.json)
new_org.avatar_url = data['avatar_url']
new_org.json = json.dumps(data)
except:
pass
new_org.save()
log.info('Migrated organization.', organization_slug=org.slug)
for org in chunks(BitbucketTeam.objects.all()):
new_org = RemoteOrganization.objects.using(db).create(
pub_date=org.pub_date,
modified_date=org.modified_date,
active=org.active,
slug=org.login,
name=org.name,
email=org.email,
url=org.html_url,
source='bitbucket',
)
for user in org.users.iterator():
new_org.users.add(user)
try:
new_org.json = json.dumps(eval(org.json))
except:
pass
new_org.save()
log.info('Migrated organization.', organization_slug=org.slug)
# Now repositories
GithubProject = apps.get_model('oauth', 'GithubProject')
BitbucketProject = apps.get_model('oauth', 'BitbucketProject')
RemoteRepository = apps.get_model('oauth', 'RemoteRepository')
for project in chunks(GithubProject.objects.all()):
new_repo = RemoteRepository.objects.using(db).create(
pub_date=project.pub_date,
modified_date=project.modified_date,
active=project.active,
name=project.name,
full_name=project.full_name,
description=project.description,
ssh_url=project.ssh_url,
html_url=project.html_url,
vcs='git',
source='github',
)
for user in project.users.iterator():
new_repo.users.add(user)
if project.organization is not None:
new_repo.organization = (RemoteOrganization
.objects
.using(db)
.get(slug=project.organization.login))
try:
data = eval(project.json)
new_repo.avatar_url = data.get('owner', {}).get('avatar_url', None)
new_repo.admin = data.get('permissions', {}).get('admin', False)
new_repo.private = data.get('private', False)
if new_repo.private:
new_repo.clone_url = data.get('ssh_url')
else:
new_repo.clone_url = data.get('clone_url')
new_repo.json = json.dumps(data)
except (SyntaxError, ValueError):
pass
new_repo.save()
log.info('Migrated project.', project_slug=project.slug)
for project in chunks(BitbucketProject.objects.all()):
new_repo = RemoteRepository.objects.using(db).create(
pub_date=project.pub_date,
modified_date=project.modified_date,
active=project.active,
name=project.name,
full_name=project.full_name,
description=project.description,
ssh_url=project.ssh_url,
html_url=project.html_url,
admin=False,
vcs=project.vcs,
source='bitbucket',
)
for user in project.users.iterator():
new_repo.users.add(user)
if project.organization is not None:
new_repo.organization = (RemoteOrganization
.objects
.using(db)
.get(slug=project.organization.login))
try:
data = eval(project.json)
new_repo.avatar_url = (data.get('links', {})
.get('avatar', {})
.get('href', None))
new_repo.private = data.get('is_private', False)
new_repo.json = json.dumps(data)
clone_urls = {location['name']: location['href']
for location
in data.get('links', {}).get('clone', {})}
if new_repo.private:
new_repo.clone_url = clone_urls.get('ssh', project.git_url)
else:
new_repo.clone_url = clone_urls.get('https', project.html_url)
except (SyntaxError, ValueError):
pass
new_repo.save()
log.info('Migrated project.', project_slug=project.slug)
def reverse_move_repos(apps, schema_editor):
"""Drop OAuth repos."""
db = schema_editor.connection.alias
RemoteRepository = apps.get_model('oauth', 'RemoteRepository')
RemoteOrganization = apps.get_model('oauth', 'RemoteOrganization')
RemoteRepository.objects.using(db).delete()
RemoteOrganization.objects.using(db).delete()
class Migration(migrations.Migration):
dependencies = [
('oauth', '0002_combine_services'),
]
operations = [
migrations.RunPython(forwards_move_repos, reverse_move_repos),
]
|
<reponame>goldmanm/atmospheric-sar-comparison
#!/usr/bin/env python
# encoding: utf-8
name = "Atkinson2007"
longDesc = u"""
The reaction site *3 needs a lone pair in order to react. It cannot be 2S or 4S.
"""
entry(
label = "parent",
group =
"""
1 *3 R u1
""",
data = None
)
entry(
label = "methyl",
molecule = """
multiplicity 2
1 *3 C u1 p0 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
""",
data = 12.9
)
entry(
label = "primary_C",
group =
"""
1 *3 C u1 {2,S} {3,S} {4,S}
2 H u0 {1,S}
3 H u0 {1,S}
4 R!H u0 {1,S}
""",
data = 9.5
)
entry(
label = "ethyl",
molecule =
"""
multiplicity 2
1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}
2 *3 C u1 p0 c0 {1,S} {3,S} {4,S}
3 H u0 p0 c0 {2,S}
4 H u0 p0 c0 {2,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
""",
data = 9.8
)
entry(
label = "1_propyl",
molecule =
"""
multiplicity 2
1 C u0 p0 c0 {2,S} {6,S} {7,S} {8,S}
2 C u0 p0 c0 {1,S} {3,S} {9,S} {10,S}
3 *3 C u1 p0 c0 {2,S} {4,S} {5,S}
4 H u0 p0 c0 {3,S}
5 H u0 p0 c0 {3,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {2,S}
""",
data = 9.6
)
entry(
label = "primary_alcohol",
molecule =
"""
multiplicity 2
1 O u0 p2 c0 {2,S} {5,S}
2 *3 C u1 p0 c0 {1,S} {3,S} {4,S}
3 H u0 p0 c0 {2,S}
4 H u0 p0 c0 {2,S}
5 H u0 p0 c0 {1,S}
""",
data = 6.8
)
entry(
label = "secondary_C",
group =
"""
1 *3 C u1 {2,S} {3,S} {4,S}
2 H u0 {1,S}
3 R!H u0 {1,S}
4 R!H u0 {1,S}
""",
data = 8.2
)
entry(
label = "2_propyl",
molecule =
"""
multiplicity 2
1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}
2 *3 C u1 p0 c0 {1,S} {3,S} {4,S}
3 H u0 p0 c0 {2,S}
4 C u0 p0 c0 {2,S} {8,S} {9,S} {10,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {4,S}
9 H u0 p0 c0 {4,S}
10 H u0 p0 c0 {4,S}
""",
data = 8.4
)
entry(
label = "secondary_alcohol",
group =
"""
1 *3 C u1 {2,S} {3,S} {4,S}
2 O u0 {1,S} {5,S}
3 R!H u0 {1,S}
4 H u0 {1,S}
5 H u0 {2,S}
""",
data = 5.2
)
entry(
label = "tertiary_C",
group =
"""
1 *3 C u1 {2,S} {3,S} {4,S}
2 R!H u0 {1,S}
3 R!H u0 {1,S}
4 R!H u0 {1,S}
""",
data = 7.1
)
entry(
label = "t_butyl",
molecule =
"""
multiplicity 2
1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}
2 *3 C u1 p0 c0 {1,S} {3,S} {4,S}
3 C u0 p0 c0 {2,S} {8,S} {9,S} {10,S}
4 C u0 p0 c0 {2,S} {11,S} {12,S} {13,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {3,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {4,S}
""",
data = 7.2
)
entry(
label = "tertiary_alcohol",
group =
"""
1 *3 C u1 {2,S} {3,S} {4,S}
2 O u0 {1,S} {5,S}
3 R!H u0 {1,S}
4 R!H u0 {1,S}
5 H u0 {2,S}
""",
data = 4.8
)
entry(
label = "oxy",
group =
"""
1 *3 O u1
""",
data = 7.5
)
entry(
label = "formyl",
molecule =
"""
multiplicity 2
1 *3 C u1 p0 c0 {2,S} {3,D}
2 H u0 p0 c0 {1,S}
3 O u0 p2 c0 {1,D}
""",
data = 10.
)
entry(
label = "carbonyl",
group =
"""
1 *3 C u1 {2,D} {3,S}
2 O u0 {1,D}
3 R!H u0 {1,S}
""",
data = 5.
)
tree(
"""
L1: parent
L2: methyl
L2: primary_C
L3: ethyl
L3: 1_propyl
L3: primary_alcohol
L2: secondary_C
L3: 2_propyl
L3: secondary_alcohol
L2: tertiary_C
L3: t_butyl
L3: tertiary_alcohol
L2: oxy
L2: formyl
L2: carbonyl
"""
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Rimco'
# System imports
import datetime
import logging
import random
import time
import errno
from threading import Lock
BRUTEFORCE_LOCK = Lock()
def del_rw(action, name, exc):
import os
import stat
if os.path.exists(name):
os.chmod(name, stat.S_IWRITE)
if os.path.isfile(name):
os.remove(name)
elif os.path.isdir(name):
os.rmdir(name)
def now():
return time.time() + (datetime.datetime.now() - datetime.datetime.utcnow()).total_seconds()
def try_float(val, default=0):
try:
return float(val)
except ValueError:
return default
def datetime_string(timestamp=None):
if timestamp:
if hasattr(timestamp, 'strftime'):
return timestamp.strftime("%Y-%m-%d %H:%M:%S")
else:
return time.strftime("%Y-%m-%d %H:%M:%S", timestamp)
else:
return time.strftime("%Y-%m-%d %H:%M:%S")
def two_digits(n):
return '%02d' % int(n)
def program_delay(program):
today = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
result = (program.start - today).total_seconds()
while result < 0:
result += program.modulo*60
return int(result/24/3600)
def formatTime(t):
from options import options
if options.time_format:
return t
else:
hour = int(t[0:2])
newhour = hour
if hour == 0:
newhour = 12
if hour > 12:
newhour = hour-12
return str(newhour) + t[2:] + (" am" if hour<12 else " pm")
def themes():
import os
return os.listdir(os.path.join('static', 'themes'))
def determine_platform():
import os
try:
import RPi.GPIO
return 'pi'
except Exception:
pass
try:
import Adafruit_BBIO.GPIO
return 'bo'
except Exception:
pass
if os.name == 'nt':
return 'nt'
return ''
def get_rpi_revision():
try:
import RPi.GPIO as GPIO
return GPIO.RPI_REVISION
except ImportError:
return 0
def reboot(wait=1, block=False):
if block:
# Stop the web server first:
from ospy import server
server.stop()
from ospy.stations import stations
stations.clear()
time.sleep(wait)
logging.info("Rebooting...")
import subprocess
if determine_platform() == 'nt':
subprocess.Popen('shutdown /r /t 0'.split())
else:
subprocess.Popen(['reboot'])
else:
from threading import Thread
t = Thread(target=reboot, args=(wait, True))
t.daemon = False
t.start()
def poweroff(wait=1, block=False):
if block:
# Stop the web server first:
from ospy import server
server.stop()
from ospy.stations import stations
stations.clear()
time.sleep(wait)
logging.info("Powering off...")
import subprocess
if determine_platform() == 'nt':
subprocess.Popen('shutdown /t 0'.split())
else:
subprocess.Popen(['poweroff'])
else:
from threading import Thread
t = Thread(target=poweroff, args=(wait, True))
t.daemon = False
t.start()
def restart(wait=1, block=False):
if block:
# Stop the web server first:
from ospy import server
server.stop()
from ospy.stations import stations
stations.clear()
time.sleep(wait)
logging.info("Restarting...")
import sys
if determine_platform() == 'nt':
import subprocess
# Use this weird construction to start a separate process that is not killed when we stop the current one
subprocess.Popen(['cmd.exe', '/c', 'start', sys.executable] + sys.argv)
else:
import os
os.execl(sys.executable, sys.executable, *sys.argv)
else:
from threading import Thread
t = Thread(target=restart, args=(wait, True))
t.daemon = False
t.start()
def uptime():
"""Returns UpTime for RPi"""
try:
with open("/proc/uptime") as f:
total_sec = float(f.read().split()[0])
string = str(datetime.timedelta(seconds=total_sec)).split('.')[0]
except Exception:
string = 'Unknown'
return string
def get_ip():
"""Returns the IP address if available."""
try:
import subprocess
arg = 'ip route list'
p = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src') + 1]
return ipaddr
except Exception:
return 'Unknown'
def get_mac():
"""Return MAC from file"""
try:
return str(open('/sys/class/net/eth0/address').read())
except Exception:
return 'Unknown'
def get_meminfo():
"""Return the information in /proc/meminfo as a dictionary"""
try:
meminfo = {}
with open('/proc/meminfo') as f:
for line in f:
meminfo[line.split(':')[0]] = line.split(':')[1].strip()
return meminfo
except Exception:
return {
'MemTotal': 'Unknown',
'MemFree': 'Unknown'
}
def get_netdevs():
"""RX and TX bytes for each of the network devices"""
try:
with open('/proc/net/dev') as f:
net_dump = f.readlines()
device_data = {}
for line in net_dump[2:]:
line = line.split(':')
if line[0].strip() != 'lo':
device_data[line[0].strip()] = {'rx': float(line[1].split()[0])/(1024.0*1024.0),
'tx': float(line[1].split()[8])/(1024.0*1024.0)}
return device_data
except Exception:
return {}
def get_cpu_temp(unit=None):
"""Returns the temperature of the CPU if available."""
import os
try:
platform = determine_platform()
if platform == 'bo':
res = os.popen('cat /sys/class/hwmon/hwmon0/device/temp1_input').readline()
temp = str(int(float(res) / 1000))
elif platform == 'pi':
res = os.popen('vcgencmd measure_temp').readline()
temp = res.replace("temp=", "").replace("'C\n", "")
else:
temp = str(0)
if unit == 'F':
return str(9.0 / 5.0 * float(temp) + 32)
elif unit is not None:
return str(float(temp))
else:
return temp
except Exception:
return '!!'
def mkdir_p(path):
import os
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def duration_str(total_seconds):
minutes, seconds = divmod(total_seconds, 60)
return '%02d:%02d' % (minutes, seconds)
def timedelta_duration_str(time_delta):
return duration_str(time_delta.total_seconds())
def timedelta_time_str(time_delta, with_seconds=False):
days, remainder = divmod(time_delta.total_seconds(), 24*3600)
hours, remainder = divmod(remainder, 3600)
if hours == 24:
hours = 0
minutes, seconds = divmod(remainder, 60)
return '%02d:%02d' % (hours, minutes) + ((':%02d' % seconds) if with_seconds else '')
def minute_time_str(minute_time, with_seconds=False):
return timedelta_time_str(datetime.timedelta(minutes=minute_time), with_seconds)
def short_day(index):
return ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][index]
def long_day(index):
return ["Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday"][index]
def stop_onrain():
"""Stop stations that do not ignore rain."""
from ospy.stations import stations
for station in stations.get():
if not station.ignore_rain:
station.activated = False
def save_to_options(qdict):
from ospy.options import options
for option in options.OPTIONS:
key = option['key']
multi_enum = option.get('multi_options')
if 'category' in option:
if key in qdict:
value = qdict[key]
if isinstance(option['default'], bool):
options[key] = True if value and value != "off" else False
elif isinstance(option['default'], int) or isinstance(option['default'], float):
if 'min' in option and float(qdict[key]) < option['min']:
continue
if 'max' in option and float(qdict[key]) > option['max']:
continue
options[key] = type(option['default'])(qdict[key])
else:
options[key] = qdict[key]
elif multi_enum:
if hasattr(multi_enum, '__call__'):
multi_enum = multi_enum()
value = []
for name in multi_enum:
v_name = key + '_' + name
if v_name in qdict and qdict[v_name] and qdict[v_name] != "off":
value.append(name)
options[key] = value
else:
if isinstance(option['default'], bool):
options[key] = False
########################
#### Login Handling ####
def password_salt():
return "".join(chr(random.randint(33, 127)) for _ in xrange(64))
def password_hash(password, salt):
import hashlib
m = hashlib.sha1()
m.update(password + salt)
return m.hexdigest()
def test_password(password):
from ospy.options import options
# Brute-force protection:
with BRUTEFORCE_LOCK:
if options.password_time > 0:
time.sleep(options.password_time)
result = options.password_hash == password_hash(password, options.password_salt)
if result:
options.password_time = 0
else:
if options.password_time < 30:
options.password_time += 1
return result
def check_login(redirect=False):
from ospy import server
import web
from ospy.options import options
qdict = web.input()
try:
if options.no_password:
return True
if server.session.validated:
return True
except KeyError:
pass
if 'pw' in qdict:
if test_password(qdict['pw']):
return True
if redirect:
raise web.unauthorized()
return False
if redirect:
raise web.seeother('/login', True)
return False
def get_input(qdict, key, default=None, cast=None):
result = default
if key in qdict:
result = qdict[key]
if cast is not None:
result = cast(result)
return result
def template_globals():
import json
import plugins
import urllib
from web import ctx
from ospy.inputs import inputs
from ospy.log import log
from ospy.options import level_adjustments, options, rain_blocks
from ospy.programs import programs, ProgramType
from ospy.runonce import run_once
from ospy.stations import stations
from ospy import version
from ospy.server import session
result = {
'str': str,
'bool': bool,
'int': int,
'round': round,
'isinstance': isinstance,
'sorted': sorted,
'hasattr': hasattr,
'now': now
}
result.update(globals()) # Everything in the global scope of this file will be available
result.update(locals()) # Everything imported in this function will be available
return result
def help_files_in_directory(docs_dir):
import os
result = []
if os.path.isdir(docs_dir):
for filename in sorted(os.listdir(docs_dir)):
if filename.endswith('.md'):
name = os.path.splitext(os.path.basename(filename))[0]
name = name.replace('.', ' ').replace('_', ' ').title()
filename = os.path.relpath(os.path.join(docs_dir, filename))
result.append((name, filename))
return result
def get_help_files():
import os
result = []
result.append((1, 'OSPy'))
result.append((2, 'Readme', 'README.md'))
for doc in help_files_in_directory(os.path.join('ospy', 'docs')):
result.append((2, doc[0], doc[1]))
result.append((1, 'API'))
result.append((2, 'Readme', os.path.join('api', 'README.md')))
for doc in help_files_in_directory(os.path.join('api', 'docs')):
result.append((2, doc[0], doc[1]))
result.append((1, 'Plug-ins'))
result.append((2, 'Readme', os.path.join('plugins', 'README.md')))
from plugins import plugin_names, plugin_dir, plugin_docs_dir
for module, name in plugin_names().iteritems():
readme_file = os.path.join(os.path.relpath(plugin_dir(module)), 'README.md')
readme_exists = os.path.isfile(readme_file)
docs = help_files_in_directory(plugin_docs_dir(module))
if readme_exists or docs:
if readme_exists:
result.append((2, name, readme_file))
else:
result.append((2, name))
for doc in docs:
result.append((3, doc[0], doc[1]))
return result
def get_help_file(id):
import web
try:
id = int(id)
docs = get_help_files()
if 0 <= id < len(docs):
option = docs[id]
if len(option) > 2:
filename = option[2]
with open(filename) as fh:
import markdown
converted = markdown.markdown(fh.read(), extensions=['partial_gfm', 'markdown.extensions.codehilite'])
return web.template.Template(converted, globals=template_globals())()
except Exception:
pass
return ''
|
"""
dj-stripe Account Tests.
"""
from copy import deepcopy
from unittest.mock import patch
import pytest
from django.test.testcases import TestCase
from djstripe.models import Account
from djstripe.settings import STRIPE_SECRET_KEY
from . import (
FAKE_ACCOUNT,
FAKE_FILEUPLOAD_ICON,
FAKE_FILEUPLOAD_LOGO,
IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
AssertStripeFksMixin,
)
class TestAccount(AssertStripeFksMixin, TestCase):
@patch("stripe.Account.retrieve", autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED)
@patch(
"stripe.FileUpload.retrieve",
side_effect=[deepcopy(FAKE_FILEUPLOAD_ICON), deepcopy(FAKE_FILEUPLOAD_LOGO)],
autospec=True,
)
def test_get_connected_account_from_token(
self, fileupload_retrieve_mock, account_retrieve_mock
):
account_retrieve_mock.return_value = deepcopy(FAKE_ACCOUNT)
account = Account.get_connected_account_from_token("fake_token")
account_retrieve_mock.assert_called_once_with(api_key="fake_token")
self.assert_fks(account, expected_blank_fks={})
@patch("stripe.Account.retrieve", autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED)
@patch(
"stripe.FileUpload.retrieve",
side_effect=[deepcopy(FAKE_FILEUPLOAD_ICON), deepcopy(FAKE_FILEUPLOAD_LOGO)],
autospec=True,
)
def test_get_default_account(self, fileupload_retrieve_mock, account_retrieve_mock):
account_retrieve_mock.return_value = deepcopy(FAKE_ACCOUNT)
account = Account.get_default_account()
account_retrieve_mock.assert_called_once_with(api_key=STRIPE_SECRET_KEY)
self.assertGreater(len(account.business_profile), 0)
self.assertGreater(len(account.settings), 0)
self.assertEqual(account.branding_icon.id, FAKE_FILEUPLOAD_ICON["id"])
self.assertEqual(account.branding_logo.id, FAKE_FILEUPLOAD_LOGO["id"])
self.assertEqual(account.settings["branding"]["icon"], account.branding_icon.id)
self.assertEqual(account.settings["branding"]["logo"], account.branding_logo.id)
with self.assertWarns(DeprecationWarning):
self.assertEqual(account.business_logo.id, account.branding_icon.id)
self.assertNotEqual(account.branding_logo.id, account.branding_icon.id)
self.assert_fks(account, expected_blank_fks={})
@patch("stripe.Account.retrieve", autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED)
@patch(
"stripe.FileUpload.retrieve",
return_value=deepcopy(FAKE_FILEUPLOAD_LOGO),
autospec=True,
)
def test_get_default_account_null_logo(
self, fileupload_retrieve_mock, account_retrieve_mock
):
fake_account = deepcopy(FAKE_ACCOUNT)
fake_account["settings"]["branding"]["icon"] = None
fake_account["settings"]["branding"]["logo"] = None
account_retrieve_mock.return_value = fake_account
account = Account.get_default_account()
account_retrieve_mock.assert_called_once_with(api_key=STRIPE_SECRET_KEY)
self.assert_fks(
account,
expected_blank_fks={
"djstripe.Account.branding_logo",
"djstripe.Account.branding_icon",
},
)
@pytest.mark.parametrize(
"mock_account_id, other_mock_account_id, expected_stripe_account",
(
("acct_fakefakefakefake001", None, "acct_fakefakefakefake001"),
(
"acct_fakefakefakefake001",
"acct_fakefakefakefake002",
"acct_fakefakefakefake002",
),
),
)
@patch(
target="djstripe.models.connect.StripeModel._create_from_stripe_object",
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test_account__create_from_stripe_object(
mock_super__create_from_stripe_object,
mock_account_id,
other_mock_account_id,
expected_stripe_account,
):
"""Ensure that we are setting the ID value correctly."""
mock_data = {"id": mock_account_id}
Account._create_from_stripe_object(
data=mock_data, stripe_account=other_mock_account_id
)
mock_super__create_from_stripe_object.assert_called_once_with(
data=mock_data,
current_ids=None,
pending_relations=None,
save=True,
stripe_account=expected_stripe_account,
)
|
<reponame>pytaunay/multiwavelength-pyrometry
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
import algorithm.generate_spectrum as gs
from algorithm.pixel_operations import choose_pixels, generate_combinations
from algorithm.temperature_functions import optimum_temperature
from algorithm.kfold import order_selection
'''
File: effect_error_emissivity_model.py
Description: investigate the effect of an erroneous emissivity model on the
error and coefficient of dispersion.
The output data is used to generate Fig. 6 in our RSI journal article.
'''
def select_true_emissivity(chosen_case):
'''
Returns the true emissivity based on the case of interest chosen.
Inputs:
- chosen_case: case of interest
Returns:
- lambda function for the case of interest
'''
### Emissivity functions
if chosen_case == 'tungsten':
# Tungsten 2000 K emissivity and polynomial of order 1 to fit it
# Source: CRC Handbook
w_wl = np.array([300,350,400,500,600,700,800,900])
w_eps_data = np.array([0.474,0.473,0.474,0.462,0.448,0.436,0.419,0.401])
w_m,w_b = np.polyfit(w_wl,w_eps_data,deg=1)
f_eps = lambda wl,T: w_m*wl + w_b
T0 = 2000
elif chosen_case =='black_body':
# Black body
f_eps = lambda wl,T: 1.0 * np.ones(len(wl))
T0 = 1500
elif chosen_case == 'gray_body':
# Gray body
f_eps = lambda wl,T: 0.5 * np.ones(len(wl))
T0 = 1500
elif chosen_case == 'second_order':
# Artificial second order
art_wl = np.array([300,500,1100])
art_eps_data = np.array([1,0.3,1])
art_fac = np.polyfit(art_wl,art_eps_data,deg=2)
a0,a1,a2 = art_fac
f_eps = lambda wl,T: a0*wl**2 + a1*wl + a2
T0 = 3000
else:
# If none of the valid case are correct, throw a runtime error.
# This should not happen but one is never too careful.
raise RuntimeError("Invalid chosen case")
return f_eps, T0
### Controls
## Case of interset. chosen_case can be
# - "black_body"
# - "gray_body"
# - "tungsten"
# - "second_order"
chosen_case = 'gray_body'
## Wavelength range
wl_min = 400
wl_max = 800
## Number of CCD pixels
npix = 3000
### Run
bb_eps = lambda wl,T: 1.0 * np.ones(len(wl))
if chosen_case != 'gray_body' and chosen_case != 'tungsten' and chosen_case != 'second_order':
raise RuntimeError("Invalid chosen case")
f_eps, T0 = select_true_emissivity(chosen_case)
# Vectors of pixels and wavelengths
wl_vec = np.linspace(wl_min,wl_max,(int)(npix))
pix_vec = np.linspace(0,npix-1,npix,dtype=np.int64)
#### Chosen emissivity function
model_list = []
for it in range(100):
model_list.append(f_eps)
model_list = np.array(model_list)
### Emission lines
#el = np.array([350,400,450,500,600,650,800])
#el = None
### Plots
#f,ax = plt.subplots(len(model_list),2)
### Iterate over multiple models
it = 0
polyvec = []
Tavevec = []
errvec = []
for idx,f_eps in enumerate(model_list):
if np.mod(idx,10) == 0:
print(idx)
### Generate some data
# I_calc -> true radiance
# noisy_data -> perturbed radiance
# filtered_data -> averaged log(noisy_data)
# data_spl -> spline representation of filtered_data (no smoothing)
# pix_sub_vec -> pixels numbers used to address the main wavelength vector
# wl_vec -> main wavelength vector
# wl_sub_vec -> subset of the main wavelength vector
I_calc,noisy_data,filtered_data,data_spl,pix_sub_vec = gs.generate_data(
wl_vec,T0,pix_vec,f_eps)
wl_sub_vec = wl_vec[pix_sub_vec]
### Choose the order of the emissivity w/ k-fold
poly_order = order_selection(data_spl,
pix_sub_vec,wl_vec,
bb_eps)
### Calculate the temperature using the whole dataset
# Pixel operations
chosen_pix = choose_pixels(pix_sub_vec,bin_method='average')
cmb_pix = generate_combinations(chosen_pix,pix_sub_vec)
# Compute the temperature
Tave, Tstd, Tmetric, sol = optimum_temperature(data_spl,cmb_pix,
pix_sub_vec,wl_vec,
poly_order)
### Store data
err = (Tave/T0-1)*100
polyvec.append(poly_order)
Tavevec.append(Tave)
errvec.append(err)
print(idx,err,poly_order,Tave)
print(np.mean(np.abs(errvec)),
np.mean(polyvec),
np.mean(Tavevec))
|
<reponame>jhbez/focus
# -*- coding: utf-8 -*-
# Copyright 2017 ProjectV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_restful import Resource
from flask import request, g, jsonify
from v.tools.v import tuple2list
from v.tools.exception import ExceptionRest
from v.habit.model.historyHabitMdl import HistoryHabitMdl
class HistoryHabitListRst(Resource, HistoryHabitMdl):
def get(self):
_get = jsonify()
try:
data = g.db_conn.execute("select * from %s where user_id='%s'" % (self._table, g.user.id,))
if g.db_conn.count() > 0:
_get = tuple2list(self._fields, data)
_get = jsonify(_get)
_get.status_code = 200
else:
raise ExceptionRest(status_code=404)
except ExceptionRest, e:
_get.status_code = e.status_code
return _get
def post(self):
_post = jsonify()
try:
_data = request.json
_insert = []
qri = "insert into %s (user_id, habit_id, state) values(%s, %s, '%s') returning id;" \
% (self._table, g.user.id, _data.get('habit_id'), _data.get('state'))
g.db_conn.execute(qri)
if g.db_conn.count() > 0:
_insert.append({"id": g.db_conn.one()[0]})
_post = jsonify(_insert)
_post.status_code = 201
else:
raise ExceptionRest(status_code=400)
except ExceptionRest, e:
_post.status_code = e.status_code
return _post
class HistoryHabitRst(Resource, HistoryHabitMdl):
def get(self, history_habit_id):
_get = jsonify()
try:
g.db_conn.execute('select * from %s where user_id =%s and id = %s;'
% (self._table, g.user.id, str(history_habit_id)))
if g.db_conn.count() > 0:
_get = jsonify(tuple2list(self._fields, g.db_conn.fetch()))
_get.status_code = 200
else:
raise ExceptionRest(status_code=404)
except ExceptionRest, e:
_get.status_code = e.status_code
return _get
def delete(self, history_habit_id):
_delete = jsonify()
try:
qrd = "delete from %s where user_id=%s and id=%s" % (self._table, g.user.id, history_habit_id)
g.db_conn.execute(qrd)
if g.db_conn.count() > 0:
_delete.status_code = 204
else:
_delete.status_code = 404
except ExceptionRest, e:
_delete.status_code = e.status_code
return _delete
def put(self, history_habit_id):
_put = jsonify()
try:
raise ExceptionRest(status_code=401)
_data = request.json
qru = "update %s set state ='%s' where user_id=%s and id = %s" % \
(self._table, _data.get('state'), g.user.id, history_habit_id)
g.db_conn.execute(qru)
if g.db_conn.count() > 0:
_put.status_code = 201
else:
raise ExceptionRest(status_code=404)
except ExceptionRest, e:
_put.status_code = e.status_code
return _put
|
<reponame>wycivil08/blendocv
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "STL format",
"author": "<NAME> (Guillaum)",
"version": (1, 0),
"blender": (2, 5, 7),
"api": 35622,
"location": "File > Import-Export > Stl",
"description": "Import-Export STL files",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"
"Scripts/Import-Export/STL",
"tracker_url": "https://projects.blender.org/tracker/index.php?"
"func=detail&aid=22837",
"support": 'OFFICIAL',
"category": "Import-Export"}
# @todo write the wiki page
"""
Import-Export STL files (binary or ascii)
- Import automatically remove the doubles.
- Export can export with/without modifiers applied
Issues:
Import:
- Does not handle the normal of the triangles
- Does not handle endien
"""
if "bpy" in locals():
import imp
if "stl_utils" in locals():
imp.reload(stl_utils)
if "blender_utils" in locals():
imp.reload(blender_utils)
import os
import bpy
from bpy.props import StringProperty, BoolProperty, CollectionProperty
from bpy_extras.io_utils import ExportHelper, ImportHelper
class ImportSTL(bpy.types.Operator, ImportHelper):
'''Load STL triangle mesh data'''
bl_idname = "import_mesh.stl"
bl_label = "Import STL"
bl_options = {'UNDO'}
filename_ext = ".stl"
filter_glob = StringProperty(
default="*.stl",
options={'HIDDEN'},
)
files = CollectionProperty(
name="File Path",
type=bpy.types.OperatorFileListElement,
)
directory = StringProperty(
subtype='DIR_PATH',
)
def execute(self, context):
from . import stl_utils
from . import blender_utils
paths = [os.path.join(self.directory, name.name)
for name in self.files]
if not paths:
paths.append(self.filepath)
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
if bpy.ops.object.select_all.poll():
bpy.ops.object.select_all(action='DESELECT')
for path in paths:
objName = bpy.path.display_name(os.path.basename(path))
tris, pts = stl_utils.read_stl(path)
blender_utils.create_and_link_mesh(objName, tris, pts)
return {'FINISHED'}
class ExportSTL(bpy.types.Operator, ExportHelper):
'''
Save STL triangle mesh data from the active object
'''
bl_idname = "export_mesh.stl"
bl_label = "Export STL"
filename_ext = ".stl"
ascii = BoolProperty(name="Ascii",
description="Save the file in ASCII file format",
default=False)
apply_modifiers = BoolProperty(name="Apply Modifiers",
description="Apply the modifiers "
"before saving",
default=True)
def execute(self, context):
from . import stl_utils
from . import blender_utils
import itertools
faces = itertools.chain.from_iterable(
blender_utils.faces_from_mesh(ob, self.apply_modifiers)
for ob in context.selected_objects)
stl_utils.write_stl(self.filepath, faces, self.ascii)
return {'FINISHED'}
def menu_import(self, context):
self.layout.operator(ImportSTL.bl_idname,
text="Stl (.stl)").filepath = "*.stl"
def menu_export(self, context):
default_path = os.path.splitext(bpy.data.filepath)[0] + ".stl"
self.layout.operator(ExportSTL.bl_idname,
text="Stl (.stl)").filepath = default_path
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_import)
bpy.types.INFO_MT_file_export.append(menu_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_import)
bpy.types.INFO_MT_file_export.remove(menu_export)
if __name__ == "__main__":
register()
|
<reponame>wuchen-huawei/huaweicloud-sdk-python-v3<filename>huaweicloud-sdk-oms/huaweicloudsdkoms/v2/model/smn_info.py<gh_stars>1-10
# coding: utf-8
import pprint
import re
import six
class SmnInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'notify_result': 'bool',
'notify_error_message': 'str',
'topic_name': 'str'
}
attribute_map = {
'notify_result': 'notify_result',
'notify_error_message': 'notify_error_message',
'topic_name': 'topic_name'
}
def __init__(self, notify_result=None, notify_error_message=None, topic_name=None):
"""SmnInfo - a model defined in huaweicloud sdk"""
self._notify_result = None
self._notify_error_message = None
self._topic_name = None
self.discriminator = None
if notify_result is not None:
self.notify_result = notify_result
if notify_error_message is not None:
self.notify_error_message = notify_error_message
if topic_name is not None:
self.topic_name = topic_name
@property
def notify_result(self):
"""Gets the notify_result of this SmnInfo.
记录迁移任务执行完毕后SMN消息是否发送成功。
:return: The notify_result of this SmnInfo.
:rtype: bool
"""
return self._notify_result
@notify_result.setter
def notify_result(self, notify_result):
"""Sets the notify_result of this SmnInfo.
记录迁移任务执行完毕后SMN消息是否发送成功。
:param notify_result: The notify_result of this SmnInfo.
:type: bool
"""
self._notify_result = notify_result
@property
def notify_error_message(self):
"""Gets the notify_error_message of this SmnInfo.
记录SMN消息发送失败原因的错误码(迁移任务成功时为空)。
:return: The notify_error_message of this SmnInfo.
:rtype: str
"""
return self._notify_error_message
@notify_error_message.setter
def notify_error_message(self, notify_error_message):
"""Sets the notify_error_message of this SmnInfo.
记录SMN消息发送失败原因的错误码(迁移任务成功时为空)。
:param notify_error_message: The notify_error_message of this SmnInfo.
:type: str
"""
self._notify_error_message = notify_error_message
@property
def topic_name(self):
"""Gets the topic_name of this SmnInfo.
SMN Topic的名称(SMN消息发送成功时为空)。
:return: The topic_name of this SmnInfo.
:rtype: str
"""
return self._topic_name
@topic_name.setter
def topic_name(self, topic_name):
"""Sets the topic_name of this SmnInfo.
SMN Topic的名称(SMN消息发送成功时为空)。
:param topic_name: The topic_name of this SmnInfo.
:type: str
"""
self._topic_name = topic_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmnInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
"""Test MIROC-ESM fixes."""
import unittest
from cf_units import Unit
from iris.coords import DimCoord
from iris.cube import Cube
from iris.exceptions import CoordinateNotFoundError
from esmvalcore.cmor._fixes.cmip5.miroc_esm import AllVars, Cl, Co2, Tro3
from esmvalcore.cmor._fixes.common import ClFixHybridPressureCoord
from esmvalcore.cmor.fix import Fix
from esmvalcore.cmor.table import get_var_info
def test_get_cl_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP5', 'MIROC-ESM', 'Amon', 'cl')
assert fix == [Cl(None), AllVars(None)]
def test_cl_fix():
"""Test fix for ``cl``."""
assert Cl is ClFixHybridPressureCoord
class TestCo2(unittest.TestCase):
"""Test c02 fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='co2', units='J')
self.vardef = get_var_info('CMIP5', 'Amon', self.cube.var_name)
self.fix = Co2(self.vardef)
def test_get(self):
"""Test fix get."""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'MIROC-ESM', 'Amon', 'co2'),
[Co2(self.vardef), AllVars(self.vardef)])
def test_fix_metadata(self):
"""Test unit fix."""
cube = self.fix.fix_metadata([self.cube])[0]
self.assertEqual(cube.data[0], 1)
self.assertEqual(cube.units, Unit('1e-6'))
class TestTro3(unittest.TestCase):
"""Test tro3 fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='tro3', units='J')
self.fix = Tro3(None)
def test_get(self):
"""Test fix get."""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'MIROC-ESM', 'Amon', 'tro3'),
[Tro3(None), AllVars(None)])
def test_fix_data(self):
"""Test data fix."""
cube = self.fix.fix_data(self.cube)
self.assertEqual(cube.data[0], 1000)
self.assertEqual(cube.units, Unit('J'))
class TestAll(unittest.TestCase):
"""Test fixes for allvars."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([[1.0, 2.0], [3.0, 4.0]], var_name='co2', units='J')
self.cube.add_dim_coord(
DimCoord([0, 1],
standard_name='time',
units=Unit('days since 0000-01-01 00:00:00',
calendar='gregorian')), 0)
self.cube.add_dim_coord(DimCoord([0, 1], long_name='AR5PL35'), 1)
self.fix = AllVars(None)
def test_get(self):
"""Test fix get."""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'MIROC-ESM', 'Amon', 'tos'),
[AllVars(None)])
def test_fix_metadata_plev(self):
"""Test plev fix."""
time = self.cube.coord('time')
time.units = Unit("days since 1-1-1", time.units.calendar)
cube = self.fix.fix_metadata([self.cube])[0]
cube.coord('air_pressure')
def test_fix_metadata_no_plev(self):
"""Test plev fix wotk with no plev."""
self.cube.remove_coord('AR5PL35')
cube = self.fix.fix_metadata([self.cube])[0]
with self.assertRaises(CoordinateNotFoundError):
cube.coord('air_pressure')
|
import rospy
import numpy as np
from gym import spaces
from openai_ros.robot_envs import modrob_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class ModRobHumanEnv(modrob_env.ModRobEnv):
def __init__(self):
"""
This Task Env is designed for having the ModRob in a human working environment.
It will learn how to finish tasks without colliding with the human.
"""
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/modrob/robot_name')
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_abs_env_var = rospy.get_param("/modrob/ros_abs_env_var", None)
try:
ros_ws_abspath = os.environ[ros_abs_env_var]
except:
print("Please set the environment variable {}".format(ros_abs_env_var))
sys.exit(1)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="modrob_simulation",
launch_file_name="start_world_human.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/human_modrob/config",
yaml_file_name="modrob_human.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(ModRobHumanEnv, self).__init__(ros_ws_abspath)
## Load in environment variables
self._get_env_variables()
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-np.inf, np.inf)
## Set action and observation space
# Continuous action space.
# All actions should range from 0 to 1. This improves training.
self.n_actions = len(self.id_action)
self.n_observations = len(self.id_observation)
self.action_space = spaces.Box(low=-1, high=1, shape=(self.n_actions,))
self.observation_space = spaces.Box(low=0, high=1, shape=(self.n_observations,))
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
self.cumulated_steps = 0.0
def _get_env_variables(self):
"""Load in environment variables from yaml.
Relevant variables:
joint_min: Minimal angle for all joints
joint_max: Maximal angle for all joints
joint_max_delta: Max theoretically allowed movement per execution step (in rad)
gripper1_min: Minimal position of gripper part 1
gripper1_max: Maximal position of gripper part 1
gripper2_min: Minimal position of gripper part 2
gripper2_max: Maximal position of gripper part 2
gripper_max_delta: Maximal theoretically allowed movement per execution step (in m)
use_delta_actions: True: Use differential position commands, False: Use absolute position commands
movement_error: Precision maximum for regular movement (can be arbitrarely small)
movement_timeout: Time waited until next movement execution
init_error: Precision for start position
goal_error: Precision for goal reached
init_joint0_position: Initial position for joint0 - TODO: Change this to dynamically.
init_joint1_position: Initial position for joint1 - TODO: Change this to dynamically.
init_joint2_position: Initial position for joint2 - TODO: Change this to dynamically.
init_hand_to_finger1_position: Initial position for gripper part 1 - TODO: Change this to dynamically.
init_hand_to_finger2_position: Initial position for gripper part 2 - TODO: Change this to dynamically.
desired_pose: Dummy pose for joint0 - TODO: Change this to dynamically.
distance_reward: Getting closer to the reward gives positive reward
time_penalty: Time penalty for every step
goal_reward: Points given when reaching the goal
"""
## Determine the normalization constants for all observations and actions.
# action_norm consists of two columns (c, r) and one row per controllable joint.
# To normalize an action a, calculate: a_n = (a-c)/r, r!=0
# To denormalize an normalized action a_n, calculate a = (a_n * r) + c
# Since we are using the tanh as activation for actions, we normalize a to [-1; 1]
self.action_norm = []
self.observation_norm = []
# Additionally, define which id refers to which action and observation.
self.id_action = []
self.id_observation = []
# Max movements
self.joint_max_delta = rospy.get_param('/modrob/joint_max_delta')
self.gripper_max_delta = rospy.get_param('/modrob/gripper_max_delta')
self.use_delta_actions = rospy.get_param('/modrob/use_delta_actions')
# TODO: Read these from urdf file.
self.joint_min = rospy.get_param('/modrob/joint_min')
self.joint_max = rospy.get_param('/modrob/joint_max')
assert self.joint_max-self.joint_min != 0, "Joint difference is zero"
self.gripper1_min = rospy.get_param('/modrob/gripper1_min')
self.gripper1_max = rospy.get_param('/modrob/gripper1_max')
assert self.gripper1_max-self.gripper1_min != 0, "Gripper 1 difference is zero"
self.gripper2_min = rospy.get_param('/modrob/gripper2_min')
self.gripper2_max = rospy.get_param('/modrob/gripper2_max')
assert self.gripper2_max-self.gripper2_min != 0, "Gripper 2 difference is zero"
# First entries are joint positions
for joint_name in self.get_joint_names():
if "joint" in joint_name:
if self.use_delta_actions:
_c = -1*self.joint_max_delta
_r = 2*self.joint_max_delta
else:
_c = self.joint_min
_r = self.joint_max-self.joint_min
# From [0; 1] normalization to [-1; 1]
c = _c + _r/2
r = _r/2
self.action_norm.append([c, r])
self.observation_norm.append([self.joint_min, self.joint_max-self.joint_min])
self.id_action.append(joint_name)
self.id_observation.append(joint_name)
# TEST for dummy reward and goal
if joint_name == "joint0":
self.goal_joint_id = len(self.id_observation)-1
elif "hand_to_finger1" in joint_name:
# Only one action for both grippers (they move together)
if self.use_delta_actions:
self.action_norm.append([-1*self.gripper_max_delta, 2*self.gripper_max_delta])
else:
self.action_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
self.observation_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
self.id_action.append(joint_name)
self.id_observation.append(joint_name)
self.action_norm = np.array(self.action_norm)
self.observation_norm = np.array(self.observation_norm)
# Movement settings
self.movement_error = rospy.get_param('/modrob/movement_error')
self.movement_timeout = rospy.get_param('/modrob/movement_timeout')
self.init_error = rospy.get_param('/modrob/init_error')
self.goal_error = rospy.get_param('/modrob/goal_error')
self.init_arm_joint_position = []
if rospy.has_param("/modrob/init_joint_position"):
self.init_arm_joint_position = rospy.get_param("/modrob/init_joint_position")
assert(len(self.init_arm_joint_position) == len(self._arm_joint_names))
# Goal and reward
self.desired_pose = rospy.get_param('/modrob/desired_pose')
self.distance_reward = rospy.get_param('/modrob/distance_reward')
self.time_penalty = rospy.get_param('/modrob/time_penalty')
self.goal_reward = rospy.get_param('/modrob/goal_reward')
def _set_init_pose(self):
"""Sets the Robot in its init pose.
"""
# Move until init position is reached (timeout=0)
self.move_all_joints(self.init_joint_position, error=self.init_error, timeout=0.0)
return True
def _init_env_variables(self):
"""Inits episode specific variables each time we reset at the start of an episode.
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
self.last_joint_position = self.get_joint_positions()
def _set_action(self, action):
"""Give a control command to the robot.
First, the action is clipped to the action space.
It is possible to assign negative rewards for too high actions.
This function denormalizes the action command and controls the robot.
Args:
action (array): Normalized actions
"""
action = np.clip(action, self.action_space.low, self.action_space.high)
rospy.logdebug("Start Set Action ==>"+str(action))
# Denormalize actions
denormalized_action = self.denormalize_actions(action)
# Build joint position dict
if self.use_delta_actions:
joint_positions = self.create_joint_positions_delta(denormalized_action)
else:
joint_positions = self.create_joint_positions_absolute(denormalized_action)
# Set action as command
self.move_all_joints(joint_positions, error=self.movement_error, timeout=self.movement_timeout)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""Get normalized observation array from robot sensors.
Returns:
observations (array): Normalized observation array
"""
rospy.logdebug("Start Get Observation ==>")
# Get non-normalized observations
observations = self.retrieve_observations()
# Normalize observations
observations = self.normalize_observations(observations)
rospy.logdebug("END Get Observation ==>"+str(observations))
return observations
def _is_done(self, observations):
"""Compute if episode is finished.
Right now only dummy goal for first joint angle.
"""
observations = self.denormalize_observations(observations)
phi1 = observations[self.goal_joint_id]
if np.isclose(phi1, self.desired_pose, atol=self.goal_error):
self._episode_done = True
return self._episode_done
def _compute_reward(self, observations, done):
"""Compute reward for this step.
Right now only dummy reward for first joint angle.
"""
reward = 0
observations = self.denormalize_observations(observations)
phi1 = observations[self.goal_joint_id]
if not done:
# Reward of minus 1 per time step
reward -= self.time_penalty
# Reward for getting closer to desired pos
reward -= self.distance_reward * (self.desired_pose-phi1)
else:
reward += self.goal_reward
rospy.logdebug("This step reward = " + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward = " + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps = " + str(self.cumulated_steps))
return reward
def create_joint_positions_absolute(self, actions):
"""Creates joint_positions from an absolute action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (Dict): key: joint_name, value: desired joint position.
"""
joint_positions = dict()
for i, action_name in enumerate(self.id_action):
if "joint" in action_name:
joint_positions[action_name] = np.clip(actions[i], self.joint_min, self.joint_max)
elif "hand_to_finger" in action_name:
joint_positions[action_name] = np.clip(actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * actions[i] + b
return joint_positions
def create_joint_positions_delta(self, actions):
"""Creates absolute joint_positions from an delta action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (Dict): key: joint_name, value: desired absolute joint position.
"""
# Use the last observed joint position (not the current!)
last_joint_positions = self.last_joint_position
joint_positions = dict()
for i, joint_name in enumerate(self.id_action):
if "joint" in joint_name:
# Calculate new desired joint position and keep it in joint ranges
joint_positions[joint_name] = np.clip(last_joint_positions[joint_name] + actions[i], self.joint_min, self.joint_max)
elif "hand_to_finger" in joint_name:
joint_positions[joint_name] = np.clip(last_joint_positions[joint_name] + actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * joint_positions[joint_name] + b
return joint_positions
def retrieve_observations(self):
"""Retrieve all observations (not normalized).
Converts joint_positions (Dict): key: joint_name, value: desired joint position, to observation array.
Returns:
observations (np.array): non normalized observations, shape = [n_observations]
"""
self.last_joint_position = self.get_joint_positions()
observations = np.zeros([len(self.id_observation)])
for i, observation_name in enumerate(self.id_observation):
if "joint" in observation_name:
observations[i] = self.last_joint_position[observation_name]
elif "hand_to_finger1" in observation_name:
# Only use one gripper observation
observations[i] = self.last_joint_position[observation_name]
return observations
# Internal TaskEnv Methods
def normalize_actions(self, actions):
"""Normalize an array of actions.
To normalize an action a, calculate: a_n = (a-c)/r, r!=0
Args:
actions: Action array, shape = [n_actions]
Returns:
normalized_actions: Normalized action array, shape = [n_actions]
"""
normalized_actions = []
if len(actions) == 0:
rospy.logerr("No actions to normalize.")
return normalized_actions
normalized_actions = (actions - self.action_norm[:, 0]) / self.action_norm[:, 1]
return normalized_actions
def denormalize_actions(self, normalized_actions):
"""Denormalize an array of actions.
To denormalize an normalized action a_n, calculate a = (a_n * r) + c
Args:
normalized_actions: Normalized action array, shape = [n_actions]
Returns:
actions: Action array, shape = [n_actions]
"""
actions = []
if len(normalized_actions) == 0:
rospy.logerr("No actions to denormalize.")
return actions
actions = (normalized_actions * self.action_norm[:, 1]) + self.action_norm[:, 0]
return actions
def normalize_observations(self, observations):
"""Normalize an array of observations.
To normalize an observation a, calculate: a_n = (a-c)/r, r!=0
Args:
observations: Action array, shape = [n_observations]
Returns:
normalized_observations: Normalized observation array, shape = [n_observations]
"""
normalized_observations = []
if len(observations) == 0:
rospy.logwarn("No observations to normalize.")
return normalized_observations
normalized_observations = (observations - self.observation_norm[:, 0]) / self.observation_norm[:, 1]
return normalized_observations
def denormalize_observations(self, normalized_observations):
"""Denormalize an array of observations.
To denormalize an normalized observation a_n, calculate a = (a_n * r) + c
Args:
normalized_observations: Normalized observation array, shape = [n_observations]
Returns:
observations: Action array, shape = [n_observations]
"""
observations = []
if len(normalized_observations) == 0:
rospy.logwarn("No observations to denormalize.")
return observations
observations = (normalized_observations * self.observation_norm[:, 1]) + self.observation_norm[:, 0]
return observations
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logwarn("new_ranges=" + str(new_ranges))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or np.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif np.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def is_in_desired_position(self,current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
return is_in_desired_pos
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = np.array((pstart.x, pstart.y, pstart.z))
b = np.array((p_end.x, p_end.y, p_end.z))
distance = np.linalg.norm(a - b)
return distance
|
<gh_stars>10-100
import numpy , pandas
from sklearn import model_selection
from sklearn import neural_network
#--------------------------------------------------
''' Representation '''
data = pandas.read_csv('sonar.csv')
X = data[data.columns[0:60]]
Y = data[data.columns[60]]
X , Y = sklearn.utils.shuffle(X , Y , random_state = 1)
X_train , X_test , Y_train , Y_test = model_selection.train_test_split(X , Y , random_state = 0)
prediction = [[0.0260,0.0363,0.0136,0.0272,0.0214,0.0338,0.0655,0.1400,0.1843,0.2354,0.2720,0.2442,0.1665,0.0336,0.1302,0.1708,0.2177,0.3175,0.3714,0.4552,0.5700,0.7397,0.8062,0.8837,0.9432,1.0000,0.9375,0.7603,0.7123,0.8358,0.7622,0.4567,0.1715,0.1549,0.1641,0.1869,0.2655,0.1713,0.0959,0.0768,0.0847,0.2076,0.2505,0.1862,0.1439,0.1470,0.0991,0.0041,0.0154,0.0116,0.0181,0.0146,0.0129,0.0047,0.0039,0.0061,0.0040,0.0036,0.0061,0.0115]]
#--------------------------------------------------
''' Neural Network '''
ML = neural_network.MLPClassifier(hidden_layer_sizes = (100 , 100 , 100) , alpha = 3 , random_state = 0).fit(X_train , Y_train)
#ML = neural_network.MLPRegressor(hidden_layer_sizes = (10 , 100 , 1) , random_state = 0).fit(X_train , Y_train)
''' default values
hidden_layer_sizes = (100 , ) #Number of hidden layers and number of units in each layer
activation = 'relu' #Activation function: 'identity' , 'logistic' , 'tanh' , 'relu'
solver = 'adam' #The solver for weight optimization: 'lbfgs' , 'sgd' , 'adam'
alpha = 0.0001 #L2 Regularisation (lower = less regularisation)
batch_size = 'auto' #Size of minibatches for stochastic optimizers. If the solver is 'lbfgs', the classifier will not use minibatch. When set to 'auto', batch_size = min(200 , n_samples)
learning_rate = 'constant' #Learning rate schedule for weight updates 'constant' , 'invscaling' , 'adaptive'
learning_rate_init = 0.001 #The initial learning rate used. It controls the step-size in updating the weights. Only used when solver = 'sgd' or 'adam'
power_t = 0.5 #The exponent for inverse scaling learning rate. It is used in updating effective learning rate when the learning_rate is set to 'invscaling'. Only used when solver = 'sgd'
max_iter = 200 #Maximum number of iterations. The solver iterates until convergence (determined by ‘tol’) or this number of iterations. For stochastic solvers (‘sgd’, ‘adam’), note that this determines the number of epochs (how many times each data point will be used), not the number of gradient steps
shuffle = True #Whether to shuffle samples in each iteration. Only used when solver = 'sgd' or 'adam'
random_state = None #If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random
tol = 0.0001 #Tolerance for the optimization. When the loss or score is not improving by at least tol for two consecutive iterations, unless learning_rate is set to ‘adaptive’, convergence is considered to be reached and training stops
verbose = False #Whether to print progress messages to stdout
warm_start = False #When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution
momentum = 0.9 #Momentum for gradient descent update. Should be between 0 and 1. Only used when solver=’sgd’
nesterovs_momentum = True #Whether to use Nesterov’s momentum. Only used when solver=’sgd’ and momentum > 0
early_stopping = False #Whether to use early stopping to terminate training when validation score is not improving. If set to true, it will automatically set aside 10% of training data as validation and terminate training when validation score is not improving by at least tol for two consecutive epochs. Only effective when solver=’sgd’ or ‘adam’
validation_fraction = 0.1 #The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if early_stopping is True
beta_1 = 0.9 #Exponential decay rate for estimates of first moment vector in adam, should be in [0, 1). Only used when solver=’adam’
beta_2 = 0.999 #Exponential decay rate for estimates of second moment vector in adam, should be in [0, 1). Only used when solver=’adam’
epsilon = 0.00000001 #Value for numerical stability in adam. Only used when solver=’adam’
---------------------------------------------------------------------------------------------------------------------------------
.classes_ #Class labels for each output
.loss_ #The current loss computed with the loss function
.coefs_ #The ith element in the list represents the weight matrix corresponding to layer i
.intercepts_ #The ith element in the list represents the bias vector corresponding to layer i + 1
.n_iter_ #The number of iterations the solver has ran
.n_layers_ #Number of layers
.n_outputs_ #Number of outputs
.out_activation_ #Name of the output activation function
---------------------------------------------------------------------------------------------------------------------------------
.fit(X , Y) #Fit the model to data matrix X and target(s) y
.get_params([deep]) #Get parameters for this estimator
.predict(X) #Predict using the multi-layer perceptron classifier
.predict_log_proba(X) #Return the log of probability estimates
.predict_proba(X) #Probability estimates
.score(X , Y) #Returns the mean accuracy on the given test data and labels
.set_params(**params) #Set the parameters of this estimator
'''
#--------------------------------------------------
''' Evaluate '''
print(ML.score(X_train , Y_train))
print(ML.score(X_test , Y_test))
#--------------------------------------------------
''' Prediction '''
print(ML.predict(prediction))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse,os,sys,re
import multiprocessing as mp
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
outputdir = os.getcwd()+os.sep+"outputtext.txt"
parser = argparse.ArgumentParser(
description='Perform text analysis and manipulation.')
parser.add_argument(
'-i', '--inputfile', type=str, help='Thread url to download image from', required=True)
parser.add_argument(
'-o', '--outputfile', type=str, help='Output file, default %s' %(outputdir), required=False, default=outputdir)
parser.add_argument(
'-p', '--prefix', type=str, help='Insert a prefix into the content of each line', required=False, default=None)
parser.add_argument(
'-s', '--suffix', type=str, help='Insert a suffix into the content of each line', required=False, default=None)
parser.add_argument(
'-e', '--extractcolumn', nargs="*", dest='cols',help='Enter column number to be extracted (1, 2, 3, etc.) and delimiter that seperates columns i.e character, word, segment of html code, etc. Specify parameters in double quotes like "COL1" "DELIMITER".', required=False, default=None)
parser.add_argument(
'-r', '--replacetext', nargs="*", dest='replacer',help='Enter text to be replaced with other text. Regular expressions are supported.', required=False, default=None)
parser.add_argument(
'-d', '--deleteemptylines',nargs='?',const=True, type=str2bool,help='Use this flag if you want to remove all the empty/blank lines from your file.', required=False, default=False)
#parser.add_argument(
#'-c', '--count',nargs='?',const=True, type=str2bool, help='Count your text\'s characters, words, sentences, lines and word frequency.', required=False, default=False)
args = parser.parse_args()
#print(args)
inputfile = args.inputfile
outfile = args.outputfile
#count = args.count
suffix = args.suffix
prefix = args.prefix
excolumn = args.cols
replacer = args.replacer
delete = args.deleteemptylines
return inputfile,outfile,suffix,prefix,excolumn,replacer,delete
class textMechanic():
def __init__(self,inputfile,outputfile,excol=None,replacer=None,suffix=None,prefix=None,delete=None):
self.inputfile = inputfile
self.outputfile = outputfile
self.prefix = prefix
self.suffix = suffix
if delete:
self.delete = delete
if replacer:
self.toreplace = replacer[0]
self.replacer = replacer[1]
else:
self.toreplace = None
self.replacer = None
self.excol = excol
if self.excol and isinstance(excol[0],int):
self.colnumber = int(self.excol[0])
self.delimiter = self.excol[1]
elif self.excol:
self.colnumber = int(self.excol[1])
self.delimiter = self.excol[0]
else:
self.colnumber = None
self.delimiter = None
def start(self):
pool = mp.Pool(10)
jobs = []
for chunkStart,chunkSize in self.chunkify():
jobs.append( pool.apply_async(self.process_wrapper,(chunkStart,chunkSize)) )
for job in jobs:
job.get()
pool.close()
def process(self,line):
line = str(line)[2:]
line = line[:-1]
line = line.strip()
dlt = False
"""if self.count:
self.lines.value += 1
self.words.value += len(line.split())
self.letters.value += len(line)"""
if self.delete:
dlt = self.isEmpty(line)
if self.replacer and self.toreplace:
line = self.replace(line)
if self.excol:
line = self.separate(line)
if self.prefix:
line = self.add_prefix(line)
if self.suffix:
line = self.add_suffix(line)
if (self.prefix or self.suffix or self.excol or (self.replacer and self.toreplace)) and not dlt:
with open(self.outputfile,'a') as f:
f.write(line+os.linesep)
def process_wrapper(self,chunkStart, chunkSize):
with open(self.inputfile,'rb') as f:
f.seek(chunkStart)
lines = f.read(chunkSize).splitlines()
for line in lines:
self.process(line)
def chunkify(self,size=1024*1024):
fileEnd = os.path.getsize(self.inputfile)
with open(self.inputfile,'rb') as f:
chunkEnd = f.tell()
while True:
chunkStart = chunkEnd
f.seek(size,1)
f.readline()
chunkEnd = f.tell()
yield chunkStart, chunkEnd - chunkStart
if chunkEnd > fileEnd:
break
def separate(self,string):
try:
return string.split(self.delimiter)[self.colnumber]
except:
print("Couldn't extract column at line '%s'. Are you sure that there are %s columns?"%(string,self.colnumber))
def isEmpty(self,string):
if not string:
return True
return False
def add_suffix(self,string):
return string+self.suffix
def add_prefix(self,string):
return self.prefix+string
def replace(self,string):
return re.sub(self.toreplace, self.replacer, string.rstrip())
if __name__ == "__main__":
inputfile,outfile,suffix,prefix,excol,replacer,delete = parse_args()
print(outfile)
if excol:
if len(excol) != 2:
sys.exit('[FATAL] - If you\'re using --e (extractcolumn) you need to specify 2 parameters, the delimiter and the column number you want to extract, example: (-e ";" "1") where ; is the delimiter and 1 the column number.')
if replacer:
if len(replacer) != 2:
sys.exit('[FATAL] - If you\'re using --r (replacetext) you need to specify 2 parameters, the text to be replaced and the text to replace the old one wit, example: (-r "old" "new").')
tm = textMechanic(inputfile,outfile,excol,replacer,suffix,prefix,delete)
print(tm.start())
|
<reponame>xyzza/m3-core
# coding: utf-8
u"""Паки и экшены для работы со справочниками."""
from logging import getLogger
from django.conf import settings
from m3.actions import (
ActionPack, Action, PreJsonResult, OperationResult, ACD
)
from m3_django_compat import atomic
from m3_ext.ui.windows.complex import ExtDictionaryWindow
from m3_ext.ui.misc.store import ExtJsonStore
from m3_ext.ui.containers import ExtPagingBar
from m3_ext.ui.results import ExtUIScriptResult
from m3.actions import utils
from m3.actions.interfaces import ISelectablePack
from m3.actions.results import ActionResult
from m3.db import BaseObjectModel, safe_delete
from m3 import RelatedError
from m3_django_compat import get_request_params
logger = getLogger('django')
try:
from m3_audit.manager import AuditManager
except ImportError:
# При сборке документации, внешняя Django ничего не знает про m3_audit
logger.warning('m3_audit import error')
MSG_DOESNOTEXISTS = (
u'Запись справочника с id=%s не найдена в базе данных.<br/>'
u'Возможно, она была удалена. Пожалуйста, обновите таблицу.'
)
class ObjectNotFound(Exception):
"""
Исключение для виртуальных справочников. Аналог DoesNotExists у моделей.
"""
pass
class DictListWindowAction(Action):
"""
Действие, которое возвращает окно со списком элементов справочника.
"""
url = '/list-window$'
def create_window(self, request, context, mode):
"""
Создаем и настраиваем окно
"""
base = self.parent
allow_copy = hasattr(base, 'allow_copy') and base.allow_copy
win = base.list_form(mode=mode, title=base.title)
win.allow_copy = allow_copy
win.height, win.width = base.height, base.width
win.min_height, win.min_width = base.height, base.width
win.init_grid_components()
if base.list_paging:
win.grid.bottom_bar = ExtPagingBar(page_size=25)
return win
def create_columns(self, control, columns):
"""
Добавляем отображаемые колонки. См. описание в базовом классе!
"""
for column in columns:
if isinstance(column, tuple):
column_params = {
'data_index': column[0],
'header': column[1],
'sortable': True}
if len(column) > 2:
column_params['width'] = column[2]
elif isinstance(column, dict):
column_params = column
else:
raise Exception('Incorrect parameter column.')
control.add_column(**column_params)
def configure_list(self, win):
base = self.parent
# Устанавливаем источники данных
grid_store = ExtJsonStore(
url=base.rows_action.get_absolute_url(),
auto_load=True,
remote_sort=True)
grid_store.total_property = 'total'
grid_store.root = 'rows'
win.grid.set_store(grid_store)
if not base.list_readonly:
# Доступны 3 события: создание нового элемента,
# редактирование или удаление имеющегося
win.url_new_grid = base.edit_window_action.get_absolute_url()
win.url_edit_grid = base.edit_window_action.get_absolute_url()
win.url_delete_grid = base.delete_action.get_absolute_url()
# Если разрешено копирование, то доступно ещё одно событие.
if getattr(base, 'allow_copy', False):
win.url_copy_grid = base.copy_action.get_absolute_url()
def run(self, request, context):
win = self.create_window(request, context, mode=0)
self.create_columns(win.grid, self.parent.list_columns)
self.configure_list(win)
# проверим право редактирования
if not self.parent.has_sub_permission(
request.user, self.parent.PERM_EDIT, request):
win.make_read_only()
return ExtUIScriptResult(self.parent.get_list_window(win))
class DictSelectWindowAction(DictListWindowAction):
"""
Действие, возвращающее окно с формой выбора из справочника
"""
url = '/select-window$'
def run(self, request, context):
base = self.parent
win = self.create_window(request, context, mode=1)
win.modal = True
self.create_columns(win.grid, self.parent.list_columns)
self.configure_list(win)
# M prefer 12.12.10 >
# win.column_name_on_select = "name"
#-----:
win.column_name_on_select = base.column_name_on_select
# prefer <
# проверим право редактирования
if not self.parent.has_sub_permission(
request.user, self.parent.PERM_EDIT, request):
win.make_read_only()
return ExtUIScriptResult(self.parent.get_select_window(win))
class DictEditWindowAction(Action):
"""
Редактирование элемента справочника
"""
url = '/edit-window$'
def context_declaration(self):
return [ACD(name='id',
default=0,
type=int,
required=True,
verbose_name=u'id элемента справочника'),
ACD(name='isGetData',
default=False,
type=bool,
required=True,
verbose_name=u'признак загрузки данных')]
def run(self, request, context):
base = self.parent
is_get_data = context.isGetData
# Получаем объект по id
try:
obj = base.get_row(context.id)
except base._nofound_exception:
return OperationResult.by_message(MSG_DOESNOTEXISTS % context.id)
# Разница между новым и созданным объектов в том,
# что у нового нет id или он пустой
create_new = True
if isinstance(obj, dict) and obj.get('id') is not None:
create_new = False
elif hasattr(obj, 'id') and getattr(obj, 'id') is not None:
create_new = False
if create_new and base.add_window:
win = utils.bind_object_from_request_to_form(
request, base.get_row, base.add_window)
else:
win = utils.bind_object_from_request_to_form(
request, base.get_row, base.edit_window)
if not win.title:
win.title = base.title
win.form.url = base.save_action.get_absolute_url()
# укажем адрес для чтения данных
win.data_url = base.edit_window_action.get_absolute_url()
# проверим право редактирования
if not self.parent.has_sub_permission(
request.user, self.parent.PERM_EDIT, request):
exclude_list = ['close_btn', 'cancel_btn']
win.make_read_only(True, exclude_list)
# У окна может быть процедура доп. конфигурации
# под конкретный справочник
if (hasattr(win, 'configure_for_dictpack') and
callable(win.configure_for_dictpack)):
win.configure_for_dictpack(action=self, pack=self.parent,
request=request, context=context)
if not is_get_data:
# если запрашивали не данные - вернем окно
return ExtUIScriptResult(base.get_edit_window(win))
else:
# если просили данные, то выжмем их из окна обратно в объект,
# т.к. в окне могли быть и другие данных (не из этого объекта)
data_object = {}
# т.к. мы не знаем какие поля должны быть у объекта - создадим
# все, которые есть на форме
all_fields = win.form._get_all_fields(win)
for field in all_fields:
data_object[field.name] = None
win.form.to_object(data_object)
return PreJsonResult({'success': True, 'data': data_object})
class DictRowsAction(Action):
"""
Действие, которое возвращает список записей справочника.
Именно список записей, которые потом могут быть отображены в гриде.
В качестве контекста выполнения может быть задано:
a) текстовая строка с фильтром (для выполнения поиска);
b) начальная позиция и смещение записей для пейджинга.
"""
url = '/rows$'
def run(self, request, context):
offset = utils.extract_int(request, 'start')
limit = utils.extract_int(request, 'limit')
request_params = get_request_params(request)
filter = request_params.get('filter')
direction = request_params.get('dir')
user_sort = request_params.get('sort')
if direction == 'DESC':
user_sort = '-' + user_sort
dict_list = []
for item in self.parent.list_columns:
if isinstance(item, (list, tuple)):
dict_list.append(item[0])
elif isinstance(item, dict) and item.get('data_index'):
dict_list.append(item['data_index'])
if (hasattr(self.parent, 'modify_rows_query') and
callable(self.parent.modify_rows_query)):
rows = self.parent.get_rows_modified(
offset, limit, filter, user_sort, request, context)
else:
rows = self.parent.get_rows(offset, limit, filter, user_sort)
return PreJsonResult(
rows, self.parent.secret_json, dict_list=dict_list)
class DictLastUsedAction(Action):
"""
Действие, которое возвращает список последних использованных действий
"""
url = '/last-rows$'
def run(self, request, context):
return PreJsonResult(self.parent.get_last_used(self))
class ListGetRowAction(Action):
"""
Действие, которое отвечает за возврат данных для
одного отдельно-взятой записи справочника
"""
url = '/item$'
def context_declaration(self):
return [ACD(
name='id',
default=0,
type=int,
required=True,
verbose_name=u'id элемента справочника')]
def run(self, request, context):
try:
result = self.parent.get_row(context.id)
except self.parent._nofound_exception:
return OperationResult.by_message(MSG_DOESNOTEXISTS % context.id)
return PreJsonResult(result)
class DictSaveAction(Action):
"""
Действие выполняет сохранение записи справочника.
"""
url = '/save$'
def context_declaration(self):
return [
ACD(
name='id',
default=0,
type=int,
required=True,
verbose_name=u'id элемента справочника')]
def run(self, request, context):
try:
if not context.id and self.parent.add_window:
obj = utils.bind_request_form_to_object(
request, self.parent.get_row, self.parent.add_window)
else:
obj = utils.bind_request_form_to_object(
request, self.parent.get_row, self.parent.edit_window)
except self.parent._nofound_exception:
return OperationResult.by_message(MSG_DOESNOTEXISTS % context.id)
# Проверка корректности полей сохраняемого объекта
result = self.parent.validate_row(obj, request)
if result:
assert isinstance(result, ActionResult)
return result
result = self.parent.save_row(obj)
if isinstance(result, OperationResult) and result.success is True:
# узкое место. после того, как мы переделаем работу экшенов,
# имя параметра с идентификатором запси может уже называться не
# id
if 'm3_audit' in settings.INSTALLED_APPS:
AuditManager().write(
'dict-changes',
user=request.user,
model_object=obj,
type='new' if not context.id else 'edit')
context.id = obj.id
return result
class ListDeleteRowAction(Action):
url = '/delete_row$'
def run(self, request, context):
"""
Удаляться одновременно могут несколько объектов. Их ключи
приходят разделенные запятыми.
"""
ids = utils.extract_int_list(request, 'id')
try:
objs = [self.parent.get_row(id) for id in ids]
except self.parent._nofound_exception:
return OperationResult.by_message(MSG_DOESNOTEXISTS % id)
result = self.parent.delete_row(objs)
if (isinstance(result, OperationResult) and
result.success is True and
'm3_audit' in settings.INSTALLED_APPS):
for obj in objs:
AuditManager().write(
'dict-changes',
user=request.user,
model_object=obj,
type='delete')
return result
class DictCopyAction(Action):
"""
Копирование записи из справочника
"""
url = '/copy$'
def context_declaration(self):
return [
ACD(
name='id',
type=int,
required=True,
verbose_name=u'id элемент справочника'
)]
def run(self, request, context):
"""
"""
base = self.parent
win = utils.bind_object_from_request_to_form(
request, base.get_row, base.edit_window, exclusion=['id'])
if not win.title:
win.title = base.title
win.form.url = base.save_action.get_absolute_url()
# укажем адрес для чтения данных
win.data_url = base.edit_window_action.get_absolute_url()
win.orig_request = request
win.orig_context = context
# У окна может быть процедура доп. конфигурации
# под конкретный справочник
if (hasattr(win, 'configure_for_dictpack') and
callable(win.configure_for_dictpack)):
win.configure_for_dictpack(
action=self, pack=self.parent,
request=request, context=context)
return ExtUIScriptResult(base.get_edit_window(win))
class BaseDictionaryActions(ActionPack, ISelectablePack):
"""
Пакет с действиями, специфичными для работы со справочниками
"""
# Заголовок окна справочника
title = '' # для записи
# Список колонок состоящий из кортежей (имя json поля, имя колонки в окне)
list_columns = []
# Окно для редактирования элемента справочника:
add_window = None # Нового
edit_window = None # Уже существующего
# Класс отвечающие за отображение форм:
list_form = ExtDictionaryWindow # Форма списка
select_form = ExtDictionaryWindow # Форма выбора
# Настройки секретности. Если стоит истина,
# то в результат добавляется флаг секретности
secret_json = False
secret_form = False
# Ширина и высота окна
width, height = 510, 400
list_paging = True
list_readonly = False
# Значение колонки по-умолчанию, которое будет подбираться
# при выборе значения из справочника
column_name_on_select = 'name'
# Добавлена ли возможность копирования
allow_copy = False
# права доступа для базовых справочников
PERM_EDIT = 'edit'
sub_permissions = {PERM_EDIT: u'Редактирование справочника'}
def __init__(self):
super(BaseDictionaryActions, self).__init__()
# В отличие от обычных паков в этом экшены
# создаются самостоятельно, а не контроллером
# Чтобы было удобно обращаться к ним по имени
self.list_window_action = DictListWindowAction()
self.select_window_action = DictSelectWindowAction()
self.edit_window_action = DictEditWindowAction()
self.rows_action = DictRowsAction()
self.last_used_action = DictLastUsedAction()
self.row_action = ListGetRowAction()
self.save_action = DictSaveAction()
self.delete_action = ListDeleteRowAction()
self.copy_action = DictCopyAction()
# Но привязать их все равно нужно
self.actions = [
self.list_window_action,
self.select_window_action,
self.edit_window_action,
self.rows_action,
self.last_used_action,
self.row_action,
self.save_action,
self.delete_action,
self.copy_action
]
# Исключение перехватываемое в экшенах, если объект не найден
self._nofound_exception = ObjectNotFound
#==================== ФУНКЦИИ ВОЗВРАЩАЮЩИЕ АДРЕСА =====================
def get_list_url(self):
"""
Возвращает адрес формы списка элементов справочника.
Используется для присвоения адресов в прикладном приложении.
"""
return self.list_window_action.get_absolute_url()
#ISelectablePack
def get_select_url(self):
"""
Возвращает адрес формы списка элементов справочника.
Используется для присвоения адресов в прикладном приложении.
"""
return self.select_window_action.get_absolute_url()
#ISelectablePack
def get_edit_url(self):
"""
Возвращает адрес формы редактирования элемента справочника.
"""
return self.edit_window_action.get_absolute_url()
def get_rows_url(self):
"""
Возвращает адрес по которому запрашиваются элементы грида
"""
return self.rows_action.get_absolute_url()
#ISelectablePack
def get_autocomplete_url(self):
"""
Возвращает адрес по которому запрашиваются элементы
подходящие введенному в поле тексту
"""
return self.get_rows_url()
#==================== ФУНКЦИИ ВОЗВРАЩАЮЩИЕ ДАННЫЕ =====================
def get_rows(self, offset, limit, filter, user_sort=''):
"""
Метод который возвращает записи грида в виде
обычного питоновского списка.
"""
raise NotImplementedError()
def get_row(self, id):
"""
Метод, который возвращает запись справочника
с указанным идентификатором.
"""
raise NotImplementedError()
def get_last_used(self):
"""
Метод, который возвращает список записей справочника,
которые были выбраны конкретным пользователем в последнее время.
Записи возвращаются в виде обычного питоновского списка.
"""
raise NotImplementedError()
def validate_row(self, obj, request):
"""
Метод отвечает за проверку корректности полей сохраняемого объекта.
Если все в порядке,
то метод не возвращает ничего, иначе результат будет возвращен экшену.
Т.е. вернуть можно любой из поддерживаемых в results.py объектов.
"""
pass
def save_row(self, obj):
"""
Метод, который выполняет сохранение записи справочника.
На момент запуска метода
в параметре object находится именно та запись справочника,
которую необходимо сохранить.
"""
raise NotImplementedError()
def delete_row(self, obj):
"""
Метод, который выполняет удаление записи справочника.
На момент запуска метода в
параметре object находится именно та запись справочника,
которую необходимо удалить.
"""
raise NotImplementedError()
#ISelectablePack
def get_display_text(self, key, attr_name=None):
"""
Получить отображаемое значение записи
(или атрибута attr_name) по ключу key
"""
row = self.get_row(key)
if row is not None:
name = attr_name if attr_name else self.column_name_on_select
text = getattr(row, name)
# getattr может возвращать метод, например verbose_name
if callable(text):
return text()
else:
return text
#ISelectablePack
def get_record(self, key):
"""
Получить значение записи по ключу key
"""
return self.get_row(key)
#====================== РАБОТА С ОКНАМИ ===============================
def get_list_window(self, win):
"""
Возвращает настроенное окно типа "Список" справочника
"""
return win
def get_select_window(self, win):
"""
Возвращает настроенное окно выбора из справочника
"""
return win
def get_edit_window(self, win):
''' Возвращает настроенное окно редактирования элемента справочника '''
return win
class BaseDictionaryModelActions(BaseDictionaryActions):
"""
Класс, который реализует действия со справочником,
записи которого являются моделями.
"""
# Настройки вида справочника (задаются конечным разработчиком)
model = None
# Список полей модели по которым будет идти поиск
filter_fields = []
# Порядок сортировки элементов списка. Работает следующим образом:
# 1. Если в list_columns модели списка есть поле
# code, то устанавливается сортировка по возрастанию этого поля;
# 2. Если в list_columns модели списка нет поля code, но
# есть поле name, то устанавливается сортировка по возрастанию поля name;
# Пример list_sort_order = ['code', '-name']
list_sort_order = None
def __init__(self):
super(BaseDictionaryModelActions, self).__init__()
if self.model:
self._nofound_exception = self.model.DoesNotExist
def get_rows_modified(
self, offset, limit, filter,
user_sort='', request=None, context=None):
'''
Возвращает данные для грида справочника
'''
sort_order = user_sort.split(',') if user_sort else self.list_sort_order
filter_fields = self._default_filter()
query = self.model.objects.all()
query = utils.apply_sort_order(query, self.list_columns, sort_order)
query = utils.apply_search_filter(query, filter, filter_fields)
if (hasattr(self, 'modify_rows_query') and
callable(self.modify_rows_query)):
query = self.modify_rows_query(query, request, context)
total = query.count()
if limit > 0:
query = query[offset: offset + limit]
result = {'rows': list(query), 'total': total}
return result
def get_rows(self, offset, limit, filter, user_sort=''):
sort_order = user_sort.split(',') if user_sort else self.list_sort_order
filter_fields = self._default_filter()
query = utils.apply_sort_order(
self.model.objects, self.list_columns, sort_order)
query = utils.apply_search_filter(query, filter, filter_fields)
total = query.count()
if limit > 0:
query = query[offset: offset + limit]
result = {'rows': list(query.all()), 'total': total}
return result
# def modify_rows_query(self, query, request, context):
# '''
# Модифицирует запрос на получение данных.
# Данный метод необходимо определить в
# дочерних классах.
# '''
# return query
def get_row(self, id):
assert isinstance(id, int)
# Если id нет, значит нужно создать новый объект
if id == 0:
record = self.model()
else:
record = self.model.objects.get(id=id)
return record
@atomic
def save_row(self, obj):
obj.save()
return OperationResult(success=True)
def delete_row(self, objs):
# Такая реализация обусловлена тем,
# что IntegrityError невозможно отловить
# до завершения транзакции, и приходится оборачивать транзакцию.
@atomic
def delete_row_in_transaction(self, objs):
message = ''
if len(objs) == 0:
message = u'Элемент не существует в базе данных.'
else:
for obj in objs:
if (isinstance(obj, BaseObjectModel) or
(hasattr(obj, 'safe_delete') and
callable(obj.safe_delete))):
try:
obj.safe_delete()
except RelatedError, e:
message = e.args[0]
else:
if not safe_delete(obj):
message = (
u'Не удалось удалить элемент %s. '
u'Возможно на него есть ссылки.' % obj.id)
break
return OperationResult.by_message(message)
# Тут пытаемся поймать ошибку из транзакции.
try:
return delete_row_in_transaction(self, objs)
except Exception, e:
# Встроенный в Django IntegrityError
# не генерируется. Кидаются исключения
# специфичные для каждого драйвера БД.
# Но по спецификации PEP 249 все они
# называются IntegrityError
if e.__class__.__name__ == 'IntegrityError':
message = (
u'Не удалось удалить элемент. '
u'Возможно на него есть ссылки.')
return OperationResult.by_message(message)
else:
# все левые ошибки выпускаем наверх
raise
def _default_filter(self):
"""
Устанавливаем параметры поиска по умолчанию 'code' и 'name' в случае,
если у модели есть такие поля
"""
filter_fields = self.filter_fields[:]
if not filter_fields:
filter_fields.extend(
[field.attname for field in self.model._meta.local_fields
if field.attname in ('code', 'name')])
return filter_fields
class BaseEnumerateDictionary(BaseDictionaryActions):
"""
Базовый экшен пак для построения справочников
основанных на перечислениях, т.е.
предопределенных неизменяемых наборах значений.
"""
# Класс перечисление с которым работает справочник
enumerate_class = None
list_paging = False # Значений как правило мало и они влезают в одну страницу грида
list_readonly = True
list_columns = [('code', 'Код', 15),
('name', 'Наименование')]
def get_rows(self, offset, limit, filter, user_sort=''):
"""
Возвращает данные для грида справочника
"""
assert self.enumerate_class is not None, (
'Attribute enumerate_class is not defined.')
data = []
for k, v in self.enumerate_class.values.items():
if filter and v.upper().find(filter.upper()) < 0:
continue
else:
data.append({'id': k, 'code': k, 'name': v})
result = {'rows': data, 'total': len(data)}
return result
def get_row(self, id):
"""
Заглушка для работы биндинга. В случае с перечислениями
сам id хранится в БД
"""
assert isinstance(id, int)
assert id in self.enumerate_class.keys(), (
'Enumarate key "%s" is not'
' defined in %s' % (id, self.enumerate_class))
return id
#ISelectablePack
def get_display_text(self, key, attr_name=None):
"""
Получить отображаемое значение записи
(или атрибута attr_name) по ключу key
"""
row_id = self.get_row(key)
text = self.enumerate_class.values.get(row_id, '')
return text
#ISelectablePack
def get_record(self, key):
return self.get_row(key)
|
import array as arr
import numpy as np
from operator import add
from tabulate import tabulate
import os.path
print("Enter your date of birth: \n")
day = input("Day: ")
month = input("Month: ")
year = input("Year: ")
print("Enter your name \n")
name = input()
save_path = ""
# Set the path to save the text files.
fname = name + ".txt"
cname = os.path.join(save_path, fname)
note = open(cname, "w+")
note.write("Date of birth: " + day + " - " + month + " - " + year + '\n')
note.write("Name: " + name + '\n')
note.write('\n')
def single_digit(num):
sum_of_digits = num
while int(num) > 9:
if num == 11 or num == 22:
break
sum_of_digits = 0
for digit in str(num):
sum_of_digits += int(digit)
num = sum_of_digits
return int(num)
def single_digit_complete(num):
sum_of_digits = num
while int(num) > 9:
sum_of_digits = 0
for digit in str(num):
sum_of_digits += int(digit)
num = sum_of_digits
return int(num)
def life_path(day, month, year):
dd = single_digit(day)
dm = single_digit(month)
dy = single_digit(year)
LP = int(day) + int(month) + int(year)
lp = dd + dm + dy
lifepath = single_digit(LP)
note.write("Your Life Path is: " + str(lp) + "/" + str(lifepath) + '\n')
LP = lifepath
pin1 = single_digit(dd + dm)
pin2 = single_digit(dd + dy)
pin3 = single_digit(pin1 + pin2)
pin4 = single_digit(dm + dy)
if LP == 11:
sLP = 2
elif LP == 22:
sLP = 4
else:
sLP = LP
p_start = 36 - sLP
note.write("PINNACLES" + '\n')
note.write('\n')
note.write("1st Pinnacle: from " + str(0) + " to " + str(int(p_start)) + " is " + str(pin1) + '\n')
note.write(
"2nd Pinnacle: from " + str(int(p_start)) + " to " + str(int(p_start) + 9) + " is " + str(pin2) + '\n')
note.write("3rd Pinnacle: from " + str(int(p_start) + 9) + " to " + str(int(p_start) + 18) + " is " + str(
pin3) + '\n')
note.write("3rd Pinnacle: from " + str(int(p_start) + 18) + " to " + str(int(p_start) + 27) + " is " + str(
pin4) + '\n')
chal1 = abs(int(single_digit_complete(dd) - single_digit_complete(dm)))
chal2 = abs(int(single_digit_complete(dd) - single_digit_complete(dy)))
mchal = abs(int(single_digit_complete(chal1) - single_digit_complete(int(chal2))))
note.write('\n')
note.write("CHALLENGES" + '\n')
note.write("Challenge in the first half: " + str(chal1) + '\n')
note.write("Challenge in the second half " + str(chal2) + '\n')
note.write("Challenge overall " + str(mchal) + '\n')
return LP
lifepath = life_path(day, month, year)
def essence(pname, age, i):
pname = pname.lower()
escence_tab = np.full((age, 1), 0)
for k in range(len(pname)):
if pname[k] == "a" or pname[k] == "j" or pname[k] == "s":
if i < len(escence_tab):
escence_tab[i] = escence_tab[i] + 1
i += 1
elif pname[k] == "b" or pname[k] == "k" or pname[k] == "t":
for ll in range(2):
# print(ll)
if i + ll < len(escence_tab):
escence_tab[i + ll] += 2
i += 2
elif pname[k] == "c" or pname[k] == "l" or pname[k] == "u":
for ll in range(3):
if i + ll < len(escence_tab):
escence_tab[i + ll] += 3
i += 3
elif pname[k] == "d" or pname[k] == "m" or pname[k] == "v":
for ll in range(4):
if i + ll < len(escence_tab):
escence_tab[i + ll] += 4
i += 4
elif pname[k] == "e" or pname[k] == "n" or pname[k] == "w":
for ll in range(5):
if i + ll < len(escence_tab):
escence_tab[i + ll] += 5
i += 5
elif pname[k] == "f" or pname[k] == "o" or pname[k] == "x":
for ll in range(6):
if i + ll < len(escence_tab):
escence_tab[i + ll] += 6
i += 6
elif pname[k] == "g" or pname[k] == "p" or pname[k] == "y":
for ll in range(7):
if i + ll < len(escence_tab):
escence_tab[i + ll] += 7
i += 7
elif pname[k] == "h" or pname[k] == "q" or pname[k] == "z":
for ll in range(8):
if i + ll < len(escence_tab):
escence_tab[i + ll] += 8
i += 8
elif pname[k] == "i" or pname[k] == "r":
for ll in range(9):
if i + ll < len(escence_tab):
escence_tab[i + ll] += 9
i += 9
return escence_tab, i
def name_number(name):
global daymon
L = len(name)
name_num = 0
soul_urge = 0
low_name = name.lower()
for i in range(0, L):
if low_name[i] == "a" or low_name[i] == "j" or low_name[i] == "s":
name_num = name_num + 1
if low_name[i] == "a":
soul_urge = soul_urge + 1
elif low_name[i] == "b" or low_name[i] == "k" or low_name[i] == "t":
name_num = name_num + 2
elif low_name[i] == "c" or low_name[i] == "l" or low_name[i] == "u":
name_num = name_num + 3
if low_name[i] == "u":
soul_urge = soul_urge + 3
elif low_name[i] == "d" or low_name[i] == "m" or low_name[i] == "v":
name_num = name_num + 4
elif low_name[i] == "e" or low_name[i] == "n" or low_name[i] == "w":
name_num = name_num + 5
if low_name[i] == "e":
soul_urge = soul_urge + 5
elif low_name[i] == "f" or low_name[i] == "o" or low_name[i] == "x":
name_num = name_num + 6
if low_name[i] == "o":
soul_urge = soul_urge + 6
elif low_name[i] == "g" or low_name[i] == "p" or low_name[i] == "y":
name_num = name_num + 7
elif low_name[i] == "h" or low_name[i] == "q" or low_name[i] == "z":
name_num = name_num + 8
elif low_name[i] == "i" or low_name[i] == "r":
name_num = name_num + 9
if low_name[i] == "i":
soul_urge = soul_urge + 9
return name_num, soul_urge
def essence_cal(name):
name = name.lower()
sp_name = name.split()
spl = len(sp_name)
age = 100
essence_tab = np.full((age, 1), 0)
# print("Calculating.", end=" ")
ind = np.full((spl, 1), 0)
temp = [np.full((spl, 1), 0)]
for i in range(spl):
temp, ind[i] = essence(sp_name[i], age, 0)
essence_tab = np.add(essence_tab, temp)
while ind[i] <= age:
temp, ind[i] = essence(sp_name[i], age, int(ind[i]))
essence_tab = np.add(essence_tab, temp)
Py = single_digit(int(day) + int(month))
sum_of_digits = 0
daymon = Py
space = ' '
note.write('\n')
note.write("ESSENCE" + '\n')
note.write(3 * space + "Year" + 4 * space + "Essence Year" + 4 * space + "Personal year" + '\n')
for i in range(len(essence_tab)):
personal_year = int(daymon) + int(year) + i
fstop = "."
note.write(
3 * space + "%d" % (int(i + int(year))) + 10 * space + "%d" % single_digit(int(
essence_tab[i])) + 10 * space + "%d" % single_digit(
int(personal_year)) + '\n')
note.write(30 * fstop + '\n')
return essence_tab
def missing_numbers(name):
name = name.lower()
missing = np.full((9, 1), 1)
number_of_times = np.full((9, 1), 0)
L = len(name)
for i in range(0, L):
if name[i] == "a" or name[i] == "j" or name[i] == "s":
missing[0] = 0
number_of_times[0] = number_of_times[0] + 1
elif name[i] == "b" or name[i] == "k" or name[i] == "t":
missing[1] = 0
number_of_times[1] = number_of_times[1] + 1
elif name[i] == "c" or name[i] == "l" or name[i] == "u":
missing[2] = 0
number_of_times[2] = number_of_times[2] + 1
elif name[i] == "d" or name[i] == "m" or name[i] == "v":
missing[3] = 0
number_of_times[3] = number_of_times[3] + 1
elif name[i] == "e" or name[i] == "n" or name[i] == "w":
missing[4] = 0
number_of_times[4] = number_of_times[4] + 1
elif name[i] == "f" or name[i] == "o" or name[i] == "x":
missing[5] = 0
number_of_times[5] = number_of_times[5] + 1
elif name[i] == "g" or name[i] == "p" or name[i] == "y":
missing[6] = 0
number_of_times[6] = number_of_times[6] + 1
elif name[i] == "h" or name[i] == "q" or name[i] == "z":
missing[7] = 0
number_of_times[7] = number_of_times[7] + 1
elif name[i] == "i" or name[i] == "r":
missing[8] = 0
number_of_times[8] = number_of_times[8] + 1
return missing, number_of_times
def name_details(name, missing, number_of_times):
name = name.lower()
sp_name = name.split()
spl = len(sp_name)
growth_number, so_ur = name_number(sp_name[0])
note.write('\n')
note.write('The growth number is ' + str(growth_number))
note.write('\n')
note.write('INTENSITY TABLE: ')
note.write('\n')
for i in range(0, 9):
note.write('The number of times ' + str(i + 1) + ' appeared is- ' + str(nam2num(number_of_times[i])) + ' ')
note.write('\n')
note.write('\n')
note.write('<NAME>: ')
note.write('\n')
for i in range(0, 9):
if missing[i] == 1:
note.write(str(i + 1) + ' ')
intensity_points = np.full((9, 1), 0)
if number_of_times[0] > 3:
intensity_points[0] = 10
if number_of_times[1] > 1:
intensity_points[1] = 10
if number_of_times[2] > 2:
intensity_points[2] = 10
if number_of_times[3] > 1:
intensity_points[3] = 10
if number_of_times[4] > 5:
intensity_points[4] = 10
if number_of_times[5] > 2:
intensity_points[5] = 10
if number_of_times[6] > 1:
intensity_points[6] = 10
if number_of_times[7] > 1:
intensity_points[7] = 10
if number_of_times[8] > 3:
intensity_points[8] = 10
note.write('\n')
note.write('INTENSITY POINTS (0 means none) ')
for i in range(0, 9):
if intensity_points[i] == 10:
note.write(str(nam2num(i + 1)) + ' ')
note.write('\n')
note.write('PRIME INTENSIFIER ')
maxpos = int(np.argmax(number_of_times))
note.write(str(maxpos + 1))
note.write('\n')
def nam2num(digit):
numname = ''
if digit == 1:
numname = 'one'
elif digit == 2:
numname = 'two'
elif digit == 2:
numname = 'two'
elif digit == 3:
numname = 'three'
elif digit == 4:
numname = 'four'
elif digit == 5:
numname = 'five'
elif digit == 6:
numname = 'six'
elif digit == 7:
numname = 'seven'
elif digit == 8:
numname = 'eight'
elif digit == 9:
numname = 'nine'
return numname
def karmic_debts(name, day, month, year):
name_no, soul_ur = name_number(name)
dd = single_digit(day)
dm = single_digit(month)
dy = single_digit(year)
LP = int(dd) + int(dm) + int(dy)
kb_13 = 0
kb_14 = 0
kb_16 = 0
kb_19 = 0
if int(day) == 13 or int(name_no) == 13 or int(LP) == 13:
kb_13 = 13
if int(day) == 14 or int(name_no) == 14 or int(LP) == 14:
kb_14 = 14
if int(day) == 16 or int(name_no) == 16 or int(LP) == 16:
kb_16 = 16
if int(day) == 19 or int(name_no) == 19 or int(LP) == 19:
kb_19 = 19
return kb_13, kb_14, kb_16, kb_19
name_no, soul_ur = name_number(name)
note.write('\n')
note.write("Your expression number is: " + str(name_no) + '\n')
note.write("Your soul urge number is: " + str(soul_ur) + '\n')
miss, num_times = missing_numbers(name)
name_details(name, miss, num_times)
kb1, kb2, kb3, kb4 = karmic_debts(name, day, month, year)
note.write("Your karmic debt numbers are: ")
if kb1 == 13:
note.write(str(kb1) + ' ')
if kb2 == 14:
note.write(str(kb2) + ' ')
if kb3 == 16:
note.write(str(kb3) + ' ')
if kb4 == 19:
note.write(str(kb4) + ' ')
note.write('\n')
essence_tab = essence_cal(name)
# from scipy.io import loadmat
# annots = loadmat('cars_train_annos.mat')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-11-05 11:30:01
# @Author : <NAME> (<EMAIL>)
# @Link : http://cs.ucsb.edu/~bolunwang
import os
import time
import numpy as np
import random
import tensorflow
from tensorflow import set_random_seed
random.seed(123)
np.random.seed(123)
set_random_seed(123)
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from causal_inference import causal_analyzer
import utils_backdoor
import sys
##############################
# PARAMETERS #
##############################
DEVICE = '3' # specify which GPU to use
DATA_DIR = 'data' # data folder
DATA_FILE = 'gtsrb_dataset_int.h5' # dataset file
MODEL_DIR = 'models' # model directory
MODEL_FILENAME = 'gtsrb_bottom_right_white_4_target_33.h5' # model file
#MODEL_FILENAME = 'trojaned_face_model_wm.h5'
RESULT_DIR = 'results' # directory for storing results
# image filename template for visualization results
IMG_FILENAME_TEMPLATE = 'gtsrb_visualize_%s_label_%d.png'
# input size
IMG_ROWS = 32
IMG_COLS = 32
IMG_COLOR = 3
INPUT_SHAPE = (IMG_ROWS, IMG_COLS, IMG_COLOR)
NUM_CLASSES = 43 # total number of classes in the model
Y_TARGET = 33 # (optional) infected target label, used for prioritizing label scanning
INTENSITY_RANGE = 'raw' # preprocessing method for the task, GTSRB uses raw pixel intensities
# parameters for optimization
BATCH_SIZE = 32 # batch size used for optimization
LR = 0.1 # learning rate
STEPS = 1000 # total optimization iterations
NB_SAMPLE = 1000 # number of samples in each mini batch
MINI_BATCH = NB_SAMPLE // BATCH_SIZE # mini batch size used for early stop
INIT_COST = 1e-3 # initial weight used for balancing two objectives
REGULARIZATION = 'l1' # reg term to control the mask's norm
ATTACK_SUCC_THRESHOLD = 0.99 # attack success threshold of the reversed attack
PATIENCE = 5 # patience for adjusting weight, number of mini batches
COST_MULTIPLIER = 2 # multiplier for auto-control of weight (COST)
SAVE_LAST = False # whether to save the last result or best result
EARLY_STOP = True # whether to early stop
EARLY_STOP_THRESHOLD = 1.0 # loss threshold for early stop
EARLY_STOP_PATIENCE = 5 * PATIENCE # patience for early stop
# the following part is not used in our experiment
# but our code implementation also supports super-pixel mask
UPSAMPLE_SIZE = 1 # size of the super pixel
MASK_SHAPE = np.ceil(np.array(INPUT_SHAPE[0:2], dtype=float) / UPSAMPLE_SIZE)
MASK_SHAPE = MASK_SHAPE.astype(int)
# parameters of the original injected trigger
# this is NOT used during optimization
# start inclusive, end exclusive
# PATTERN_START_ROW, PATTERN_END_ROW = 27, 31
# PATTERN_START_COL, PATTERN_END_COL = 27, 31
# PATTERN_COLOR = (255.0, 255.0, 255.0)
# PATTERN_LIST = [
# (row_idx, col_idx, PATTERN_COLOR)
# for row_idx in range(PATTERN_START_ROW, PATTERN_END_ROW)
# for col_idx in range(PATTERN_START_COL, PATTERN_END_COL)
# ]
##############################
# END PARAMETERS #
##############################
def load_dataset(data_file=('%s/%s' % (DATA_DIR, DATA_FILE))):
dataset = utils_backdoor.load_dataset(data_file, keys=['X_test', 'Y_test'])
X_test = np.array(dataset['X_test'], dtype='float32')
Y_test = np.array(dataset['Y_test'], dtype='float32')
print('X_test shape %s' % str(X_test.shape))
print('Y_test shape %s' % str(Y_test.shape))
return X_test, Y_test
def build_data_loader(X, Y):
datagen = ImageDataGenerator()
generator = datagen.flow(
X, Y, batch_size=BATCH_SIZE)
return generator
def trigger_analyzer(analyzer, gen):
visualize_start_time = time.time()
# execute reverse engineering
analyzer.analyze(gen)
visualize_end_time = time.time()
print('visualization cost %f seconds' %
(visualize_end_time - visualize_start_time))
return
def save_pattern(pattern, mask, y_target):
# create result dir
if not os.path.exists(RESULT_DIR):
os.mkdir(RESULT_DIR)
img_filename = (
'%s/%s' % (RESULT_DIR,
IMG_FILENAME_TEMPLATE % ('pattern', y_target)))
utils_backdoor.dump_image(pattern, img_filename, 'png')
img_filename = (
'%s/%s' % (RESULT_DIR,
IMG_FILENAME_TEMPLATE % ('mask', y_target)))
utils_backdoor.dump_image(np.expand_dims(mask, axis=2) * 255,
img_filename,
'png')
fusion = np.multiply(pattern, np.expand_dims(mask, axis=2))
img_filename = (
'%s/%s' % (RESULT_DIR,
IMG_FILENAME_TEMPLATE % ('fusion', y_target)))
utils_backdoor.dump_image(fusion, img_filename, 'png')
pass
def start_analysis():
print('loading dataset')
X_test, Y_test = load_dataset()
# transform numpy arrays into data generator
test_generator = build_data_loader(X_test, Y_test)
print('loading model')
model_file = '%s/%s' % (MODEL_DIR, MODEL_FILENAME)
model = load_model(model_file)
# initialize analyzer
analyzer = causal_analyzer(
model,
test_generator,
input_shape=INPUT_SHAPE,
init_cost=INIT_COST, steps=STEPS, lr=LR, num_classes=NUM_CLASSES,
mini_batch=MINI_BATCH,
upsample_size=UPSAMPLE_SIZE,
patience=PATIENCE, cost_multiplier=COST_MULTIPLIER,
img_color=IMG_COLOR, batch_size=BATCH_SIZE, verbose=2,
save_last=SAVE_LAST,
early_stop=EARLY_STOP, early_stop_threshold=EARLY_STOP_THRESHOLD,
early_stop_patience=EARLY_STOP_PATIENCE)
# y_label list to analyze
y_target_list = list(range(NUM_CLASSES))
y_target_list.remove(Y_TARGET)
y_target_list = [Y_TARGET] + y_target_list
y_target_list = [33]
for y_target in y_target_list:
#print('processing label %d' % y_target)
trigger_analyzer(
analyzer, test_generator)
pass
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = DEVICE
utils_backdoor.fix_gpu_memory()
for i in range (0, 3):
print(i)
start_analysis()
pass
if __name__ == '__main__':
#sys.stdout = open('file', 'w')
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print('elapsed time %s s' % elapsed_time)
#sys.stdout.close() |
<filename>train.py
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
Training part of the Deep QA model
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 Xuming Lin. All Rights Reserved"
__author__ = "<NAME>, <NAME><<EMAIL>>"
__date__ = "2017-08-21:23:42:17"
import os
curdir = os.path.dirname(os.path.abspath(__file__))
from qacnn import QACNN
from sklearn import metrics
from tqdm import tqdm
import tensorflow as tf
import datetime
import operator
import data
flags, FLAGS = tf.app.flags, tf.app.flags.FLAGS
flags.DEFINE_integer('sequence_length', 100, 'sequence length') # noqa: skipped autopep8 checking
flags.DEFINE_integer('evaluate_every', 1, 'evaluate every N steps') # noqa: skipped autopep8 checking
flags.DEFINE_integer('num_epochs', 300, 'epochs') # noqa: skipped autopep8 checking
flags.DEFINE_integer('batch_size', 100, 'min batch size') # noqa: skipped autopep8 checking
flags.DEFINE_integer('embedding_size', 50, 'embedding size') # noqa: skipped autopep8 checking
flags.DEFINE_integer('hidden_size', 80, 'hidden size') # noqa: skipped autopep8 checking
flags.DEFINE_integer('num_filters', 512, 'number of filters') # noqa: skipped autopep8 checking
flags.DEFINE_float('l2_reg_lambda', 0., 'L2 regularization factor') # noqa: skipped autopep8 checking
flags.DEFINE_float('keep_prob', 1.0, 'Dropout keep rate') # noqa: skipped autopep8 checking
flags.DEFINE_float('lr', 0.001, 'learning rate') # noqa: skipped autopep8 checking
flags.DEFINE_float('margin', 0.05, 'margin for computing loss') # noqa: skipped autopep8 checking
# Config函数
class Config(object):
def __init__(self, vocab_size):
# 输入序列(句子)长度
self.sequence_length = FLAGS.sequence_length
# 循环数
self.num_epochs = FLAGS.num_epochs
# batch大小
self.batch_size = FLAGS.batch_size
# 词表大小
self.vocab_size = vocab_size
# 词向量大小
self.embedding_size = FLAGS.embedding_size
# 不同类型的filter,相当于1-gram,2-gram,3-gram和5-gram
self.filter_sizes = [1, 2, 3, 5]
# 隐层大小
self.hidden_size = FLAGS.hidden_size
# 每种filter的数量
self.num_filters = FLAGS.num_filters
# 论文里给的是0.0001
self.l2_reg_lambda = FLAGS.l2_reg_lambda
# dropout
self.keep_prob = FLAGS.keep_prob
# 学习率
# 论文里给的是0.01
self.lr = FLAGS.lr
# margin
# 论文里给的是0.009
self.m = FLAGS.margin
# 设定GPU的性质,允许将不能在GPU上处理的部分放到CPU
# 设置log打印
self.cf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
'''
GPU内存使用策略
'''
# 自动增长
self.cf.gpu_options.allow_growth=True
# 只占用20%的GPU内存
# self.cf.gpu_options.per_process_gpu_memory_fraction = 0.2
self.test_data = data.load_test(self.sequence_length, self.sequence_length)
print('Loading Data...')
# 词映射ID
vocab = data.vocab_data
# 配置文件
config = Config(len(vocab['word2id']))
def main(unused_argv):
'''
开始训练和测试
'''
with tf.device('/gpu:0'), tf.Session(config=config.cf) as sess:
# 建立CNN网络
cnn = QACNN(config, sess)
# 保存Metrics数据
tf_writer = tf.summary.FileWriter(logdir=os.path.join(curdir, 'sdist/'), graph=sess.graph)
# Summaries for loss and accuracy during training
summary_loss = tf.summary.scalar("train/loss", cnn.loss)
summary_accu = tf.summary.scalar("train/accuracy", cnn.accu)
summary_op = tf.summary.merge([summary_loss, summary_accu])
# 训练函数
def train_step(x_batch_1, x_batch_2, x_batch_3):
feed_dict = {
cnn.q: x_batch_1,
cnn.aplus: x_batch_2,
cnn.aminus: x_batch_3,
cnn.keep_prob: config.keep_prob
}
_, step, loss, accuracy, summaries = sess.run(
[cnn.train_op, cnn.global_step, cnn.loss, cnn.accu, summary_op],
feed_dict)
tf_writer.add_summary(summaries, step)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
return time_str, step, loss, accuracy
# 测试函数
def dev_step(step):
# 混淆矩阵建立评估
# http://www.uta.fi/sis/tie/tl/index/Rates.pdf
quality = {'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0}
losses = []
labels = []
scores = []
pbar = tqdm(config.test_data)
pbar.set_description("evaluate step %s" % step)
for x in pbar:
_, loss, score = cnn.predict(dict({
'question': x[1],
'utterance': x[2]
}), x[3])
scores.append(score)
losses.append(loss)
labels.append(x[3])
# 使用Roc Curve生成Threshold
# http://alexkong.net/2013/06/introduction-to-auc-and-roc/
fpr, tpr, th = metrics.roc_curve(labels, scores)
threshold = round(metrics.auc(fpr, tpr), 5)
if score >= threshold and x[3]==1:
quality['tp'] += 1
elif score >= threshold and x[3]==0:
quality['fp'] += 1
elif score < threshold and x[3]==1:
quality['fn'] += 1
else:
quality['tn'] += 1
accuracy = float(quality['tp'] + quality['tn'] )/(quality['tp'] + quality['tn'] + quality['fp'] + quality['fn'])
loss = tf.reduce_mean(losses).eval()
tf_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="evaluate/loss", simple_value=loss),
tf.Summary.Value(tag="evaluate/accuracy", simple_value=accuracy)]), step)
print('evaluation @ step %d: 准确率: %d, 损失函数: %s, threshold: %d' % (step, accuracy, loss, threshold))
# 每500步测试一下
# 开始训练和测试
sess.run(tf.global_variables_initializer())
for i in range(config.num_epochs):
for (_, x_question, x_utterance, y) in data.load_train(config.batch_size, config.sequence_length, config.sequence_length):
if len(_) == config.batch_size: # 在epoch的最后一个mini batch中,数据条数可能不等于 batch_size
_, global_step, _, _ = train_step(x_question, x_utterance, y)
if global_step % FLAGS.evaluate_every == 0:
dev_step(global_step)
if __name__ == '__main__':
tf.app.run()
|
<reponame>matejklemen/slovene-coreference-resolution
import json
from collections import Counter
import logging
import os
from typing import List, Optional, Mapping
from sklearn.model_selection import train_test_split
PAD_TOKEN, PAD_ID = "<PAD>", 0
BOS_TOKEN, BOS_ID = "<BOS>", 1
EOS_TOKEN, EOS_ID = "<EOS>", 2
UNK_TOKEN, UNK_ID = "<UNK>", 3
class KFoldStateCache:
def __init__(self, script_name: str, main_dataset: str, fold_info: List[dict],
additional_dataset: Optional[str] = None,
script_args: Optional[Mapping] = None):
self.script_name = script_name
self.fold_info = fold_info
self.num_folds = len(self.fold_info)
self.script_args = script_args if script_args is not None else {}
# The dataset that is being split with KFold CV
self.main_dataset = main_dataset
# For combined runners: documents, read with `read_corpus(additional_dataset)` should be placed in training set
self.additional_dataset = additional_dataset
def get_next_unfinished(self):
for i, curr_fold in enumerate(self.fold_info):
if curr_fold.get("results", None) is None:
yield {
"idx_fold": i,
"train_docs": curr_fold["train_docs"],
"test_docs": curr_fold["test_docs"]
}
def add_results(self, idx_fold, results):
self.fold_info[idx_fold]["results"] = results
def save(self, path):
_path = path if path.endswith(".json") else f"{path}.json"
if os.path.exists(_path):
logging.warning(f"Overwriting KFold cache at '{_path}'")
with open(_path, "w", encoding="utf8") as f:
json.dump({
"script_name": self.script_name,
"script_args": self.script_args,
"main_dataset": self.main_dataset,
"additional_dataset": self.additional_dataset,
"fold_info": self.fold_info
}, fp=f, indent=4)
@staticmethod
def from_file(path):
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
instance = KFoldStateCache(script_name=data["script_name"],
script_args=data.get("script_args", None),
main_dataset=data["main_dataset"],
fold_info=data["fold_info"],
additional_dataset=data.get("additional_dataset", None))
return instance
def extract_vocab(documents, top_n=10_000, lowercase=False):
token_counter = Counter()
for curr_doc in documents:
curr_sentences = curr_doc.raw_sentences()
for sent_tokens in curr_sentences:
processed = list(map(lambda s: s.lower() if lowercase else s, sent_tokens))
token_counter += Counter(processed)
tok2id, id2tok = {}, {}
special_tokens = [(PAD_TOKEN, PAD_ID), (BOS_TOKEN, BOS_ID), (EOS_TOKEN, EOS_ID), (UNK_TOKEN, UNK_ID)]
for t, i in special_tokens:
tok2id[t] = i
id2tok[i] = t
for i, (token, _) in enumerate(token_counter.most_common(top_n), start=len(special_tokens)):
tok2id[token] = i
id2tok[i] = token
return tok2id, id2tok
def encode(seq, vocab, max_seq_len):
encoded_seq = []
for i, curr_token in enumerate(seq):
encoded_seq.append(vocab.get(curr_token, vocab["<UNK>"]))
# If longer than max allowed length, truncate sequence; otherwise pad with a special symbol
if len(seq) > max_seq_len:
encoded_seq = encoded_seq[: max_seq_len]
else:
encoded_seq += [vocab["<PAD>"]] * (max_seq_len - len(seq))
return encoded_seq
def get_clusters(preds):
""" Convert {antecedent_id: mention_id} pairs into {mention_id: assigned_cluster_id} pairs. """
cluster_assignments = {}
for id_cluster, cluster_starter in enumerate(preds.get(None, [])):
stack = [cluster_starter]
curr_cluster = []
while len(stack) > 0:
cur = stack.pop()
curr_cluster.append(cur)
cluster_assignments[cur] = id_cluster
mentions = preds.get(cur)
if mentions is not None:
stack.extend(mentions)
return cluster_assignments
def split_into_sets(documents, train_prop=0.7, dev_prop=0.15, test_prop=0.15):
"""
Splits documents array into three sets: learning, validation & testing.
If random seed is given, documents selected for each set are randomly picked (but do not overlap, of course).
"""
# Note: test_prop is redundant, but it's left in to make it clear this is a split into 3 parts
test_prop = 1.0 - train_prop - dev_prop
train_docs, dev_test_docs = train_test_split(documents, test_size=(dev_prop + test_prop))
dev_docs, test_docs = train_test_split(dev_test_docs, test_size=test_prop/(dev_prop + test_prop))
logging.info(f"{len(documents)} documents split to: training set ({len(train_docs)}), dev set ({len(dev_docs)}) "
f"and test set ({len(test_docs)}).")
return train_docs, dev_docs, test_docs
def fixed_split(documents, dataset):
tr, dev, te = read_splits(os.path.join("..", "data", "seeded_split", f"{dataset}.txt"))
assert (len(tr) + len(dev) + len(te)) == len(documents)
train_docs = list(filter(lambda doc: doc.doc_id in tr, documents))
dev_docs = list(filter(lambda doc: doc.doc_id in dev, documents))
te_docs = list(filter(lambda doc: doc.doc_id in te, documents))
return train_docs, dev_docs, te_docs
def read_splits(file_path):
with open(file_path, "r") as f:
doc_ids = []
# train, dev, test
for _ in range(3):
curr_ids = set(f.readline().strip().split(","))
doc_ids.append(curr_ids)
return doc_ids
if __name__ == "__main__":
""" 'rc_1' and 'rc_3' are first mentions of some entity,
'rc_2' and 'rc_5' refer to 'rc_1', etc. """
preds = {
None: ['rc_1', 'rc_3'],
'rc_1': ['rc_2', 'rc_5'],
'rc_2': ['rc_4'],
'rc_5': ['rc_6', 'rc_11']
}
print(get_clusters(preds))
|
<filename>tests/test_postprocessing.py
import numpy as np
import pytest
from component_vis import factor_tools, postprocessing
def test_normalise_cp_tensor_normalises(rng):
A = rng.standard_normal((10, 3))
B = rng.standard_normal((20, 3))
C = rng.standard_normal((30, 3))
D = rng.standard_normal((40, 3))
cp_tensor = (None, (A, B, C, D))
normalised_cp_tensor = postprocessing.normalise_cp_tensor(cp_tensor)
for factor_matrix in normalised_cp_tensor[1]:
np.testing.assert_allclose(np.linalg.norm(factor_matrix, axis=0), 1)
w = rng.standard_normal((3,))
cp_tensor = (w, (A, B, C, D))
normalised_cp_tensor = postprocessing.normalise_cp_tensor(cp_tensor)
for factor_matrix in normalised_cp_tensor[1]:
np.testing.assert_allclose(np.linalg.norm(factor_matrix, axis=0), 1)
def test_normalise_cp_tensor_does_not_change_tensor(rng):
A = rng.standard_normal((10, 4))
B = rng.standard_normal((11, 4))
C = rng.standard_normal((12, 4))
w = rng.standard_normal((4,))
cp_tensor = (w, (A, B, C))
dense_tensor = factor_tools.construct_cp_tensor(cp_tensor)
normalised_cp_tensor = postprocessing.normalise_cp_tensor(cp_tensor)
normalised_dense_tensor = factor_tools.construct_cp_tensor(normalised_cp_tensor)
np.testing.assert_allclose(dense_tensor, normalised_dense_tensor)
def test_distribute_weights_in_one_mode_does_not_change_tensor(rng):
A = rng.standard_normal(size=(10, 4))
B = rng.standard_normal(size=(11, 4))
C = rng.standard_normal(size=(12, 4))
w = rng.uniform(size=(4,))
cp_tensor = (w, (A, B, C))
dense_tensor = factor_tools.construct_cp_tensor(cp_tensor)
for mode in range(3):
redistributed_cp_tensor = postprocessing.distribute_weights_in_one_mode(
cp_tensor, mode
)
redistributed_dense_tensor = factor_tools.construct_cp_tensor(
redistributed_cp_tensor
)
np.testing.assert_allclose(dense_tensor, redistributed_dense_tensor)
def test_distribute_weights_in_one_mode_distributes_correctly(rng):
A = rng.standard_normal(size=(10, 4))
B = rng.standard_normal(size=(11, 4))
C = rng.standard_normal(size=(12, 4))
w = rng.uniform(size=(4,))
cp_tensor = (w, (A, B, C))
dense_tensor = factor_tools.construct_cp_tensor(cp_tensor)
for mode in range(3):
new_weights, new_factors = postprocessing.distribute_weights_in_one_mode(
cp_tensor, mode
)
np.testing.assert_allclose(new_weights, np.ones_like(new_weights))
for i, new_factor_matrix in enumerate(new_factors):
if i != mode:
np.testing.assert_allclose(
np.linalg.norm(new_factor_matrix, axis=0),
np.ones_like(new_factor_matrix[0]),
)
def test_distribute_weights_evenly_does_not_change_tensor(rng):
A = rng.standard_normal((10, 4))
B = rng.standard_normal((11, 4))
C = rng.standard_normal((12, 4))
w = rng.uniform(size=(4,))
cp_tensor = (w, (A, B, C))
dense_tensor = factor_tools.construct_cp_tensor(cp_tensor)
redistributed_cp_tensor = postprocessing.distribute_weights_evenly(cp_tensor)
redistributed_cp_tensor = factor_tools.construct_cp_tensor(redistributed_cp_tensor)
np.testing.assert_allclose(dense_tensor, redistributed_cp_tensor)
def test_distribute_weights_evenly(rng):
A = rng.standard_normal((10, 4))
B = rng.standard_normal((11, 4))
C = rng.standard_normal((12, 4))
w = rng.uniform(size=(4,))
cp_tensor = (w, (A, B, C))
dense_tensor = factor_tools.construct_cp_tensor(cp_tensor)
new_weights, new_factors = postprocessing.distribute_weights_evenly(cp_tensor)
np.testing.assert_allclose(
np.linalg.norm(new_factors[0], axis=0), np.linalg.norm(new_factors[1], axis=0)
)
np.testing.assert_allclose(
np.linalg.norm(new_factors[0], axis=0), np.linalg.norm(new_factors[2], axis=0)
)
np.testing.assert_allclose(new_weights, np.ones_like(new_weights))
def test_resolve_cp_sign_indeterminacy_does_not_change_tensor(rng):
A = rng.standard_normal((10, 4))
B = rng.standard_normal((11, 4))
C = rng.standard_normal((12, 4))
w = rng.uniform(size=(4,))
cp_tensor = (w, (A, B, C))
dense_tensor = factor_tools.construct_cp_tensor(cp_tensor)
sign_flipped_cp_tensor = postprocessing.resolve_cp_sign_indeterminacy(
cp_tensor, dense_tensor
)
sign_flipped_dense_tensor = factor_tools.construct_cp_tensor(sign_flipped_cp_tensor)
np.testing.assert_allclose(dense_tensor, sign_flipped_dense_tensor)
@pytest.mark.parametrize("method", ["transpose", "positive_coord"])
def test_resolve_cp_sign_indeterminacy_flips_negative_components_for_nonnegative_tensor(
rng, method
):
A = rng.uniform(size=(10, 4))
B = rng.uniform(size=(11, 4))
C = rng.uniform(size=(12, 4))
w = rng.uniform(size=(4,))
factor_matrices = [A, B, C]
cp_tensor = (w, factor_matrices)
dense_tensor = factor_tools.construct_cp_tensor(cp_tensor)
for flip1 in range(3):
for flip2 in range(3):
if flip1 == flip2:
continue
signs = np.ones(3)
signs[flip1] = -1
signs[flip2] = -1
wrong_flip_factor_matrices = [
factor_matrix * sign
for sign, factor_matrix in zip(signs, factor_matrices)
]
wrong_flip_cp_tensor = (w, wrong_flip_factor_matrices)
sign_flipped_cp_tensor = postprocessing.resolve_cp_sign_indeterminacy(
wrong_flip_cp_tensor,
dense_tensor,
resolve_mode=flip1,
unresolved_mode=flip2,
method=method,
)
assert np.all(sign_flipped_cp_tensor[1][0] >= 0)
assert np.all(sign_flipped_cp_tensor[1][1] >= 0)
assert np.all(sign_flipped_cp_tensor[1][2] >= 0)
assert np.all(
wrong_flip_cp_tensor[1][flip1] <= 0
), "Did not setup test correctly"
assert np.all(
wrong_flip_cp_tensor[1][flip2] <= 0
), "Did not setup test correctly"
def test_permute_cp_tensor(rng):
# Create a rank-3 CP tensor and a copy of it that is permuted. Test if postprocessing.permute_cp_tensor permutes it back
# Modify the permuted copy so that one of its components consists of random vectors, check again that the permutation is correct
# Create a copy of the rank-3 CP tensor, permute it and remove a component. Align the 3-component to the 2-component model and check that the first two components are the two components present in the two-component model
A = rng.standard_normal((10, 4))
B = rng.standard_normal((11, 4))
C = rng.standard_normal((12, 4))
w = rng.uniform(size=(4,))
cp_tensor = (w, (A, B, C))
permutation = [2, 1, 3, 0]
cp_tensor_permuted = (
w[permutation],
(A[:, permutation], B[:, permutation], C[:, permutation]),
)
cp_tensor_permuted_back = postprocessing.permute_cp_tensor(
cp_tensor_permuted, cp_tensor
)
assert factor_tools.check_cp_tensors_equals(cp_tensor_permuted_back, cp_tensor)
# Check permutation comparing against fewer components
permutation_2comp = [1, 3]
cp_tensor_permuted2 = (
w[permutation_2comp],
(A[:, permutation_2comp], B[:, permutation_2comp], C[:, permutation_2comp]),
)
aligned_cp_tensor = postprocessing.permute_cp_tensor(cp_tensor, cp_tensor_permuted2)
aligned_weights, aligned_factors = aligned_cp_tensor
assert np.all(aligned_weights[:2] == cp_tensor_permuted2[0])
for factor1, factor2 in zip(cp_tensor_permuted2[1], aligned_factors):
np.testing.assert_allclose(factor1, factor2[:, :2])
# Check that the permutation is equivalent to the unpermuted decomposition
assert factor_tools.check_cp_tensors_equivalent(cp_tensor, aligned_cp_tensor)
|
<filename>storyboard/notifications/subscriber.py<gh_stars>0
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from oslo_config import cfg
from oslo_log import log
from pika.exceptions import ConnectionClosed
from stevedore import enabled
from storyboard.notifications.conf import NOTIFICATION_OPTS
from storyboard.notifications.connection_service import ConnectionService
from storyboard._i18n import _, _LW
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def subscribe():
try:
log.register_options(CONF)
except cfg.ArgsAlreadyParsedError:
pass
log.setup(CONF, 'storyboard')
CONF(project='storyboard')
CONF.register_opts(NOTIFICATION_OPTS, "notifications")
subscriber = Subscriber(CONF.notifications)
subscriber.start()
manager = enabled.EnabledExtensionManager(
namespace='storyboard.plugin.worker',
check_func=check_enabled,
invoke_on_load=True,
invoke_args=(CONF,)
)
while subscriber.started:
(method, properties, body) = subscriber.get()
if not method or not properties:
LOG.debug(_("No messages available, sleeping for 5 seconds."))
time.sleep(5)
continue
manager.map(handle_event, body)
# Ack the message
subscriber.ack(method.delivery_tag)
def handle_event(ext, body):
"""Handle an event from the queue.
:param ext: The extension that's handling this event.
:param body: The body of the event.
:return: The result of the handler.
"""
payload = json.loads(body)
return ext.obj.event(author_id=payload['author_id'] or None,
method=payload['method'] or None,
url=payload['url'] or None,
path=payload['path'] or None,
query_string=payload['query_string'] or None,
status=payload['status'] or None,
resource=payload['resource'] or None,
resource_id=payload['resource_id'] or None,
sub_resource=payload['sub_resource'] or None,
sub_resource_id=payload['sub_resource_id'] or None,
resource_before=payload['resource_before'] or None,
resource_after=payload['resource_after'] or None)
def check_enabled(ext):
"""Check to see whether an extension should be enabled.
:param ext: The extension instance to check.
:return: True if it should be enabled. Otherwise false.
"""
return ext.obj.enabled()
class Subscriber(ConnectionService):
def __init__(self, conf):
"""Setup the subscriber instance based on our configuration.
:param conf A configuration object.
"""
super(Subscriber, self).__init__(conf)
self._queue_name = conf.rabbit_event_queue_name
self._binding_keys = ['task', 'story', 'project', 'project_group',
'timeline_event']
self.add_open_hook(self._declare_queue)
def _declare_queue(self):
"""Declare the subscription queue against our exchange.
"""
self._channel.queue_declare(queue=self._queue_name,
durable=True)
# Set up the queue bindings.
for binding_key in self._binding_keys:
self._channel.queue_bind(exchange=self._exchange_name,
queue=self._queue_name,
routing_key=binding_key)
def ack(self, delivery_tag):
"""Acknowledge receipt and processing of the message.
"""
self._channel.basic_ack(delivery_tag)
def get(self):
"""Get a single message from the queue. If the subscriber is currently
waiting to reconnect, it will return None. Note that you must
manually ack the message after it has been successfully processed.
:rtype: (None, None, None)|(spec.Basic.Get,
spec.Basic.Properties,
str or unicode)
"""
# Sanity check one, are we closing?
if self._closing:
return None, None, None
# Sanity check two, are we open, or reconnecting?
if not self._open:
return None, None, None
try:
return self._channel.basic_get(queue=self._queue_name,
no_ack=False)
except ConnectionClosed as cc:
LOG.warning(_LW("Attempted to get message on closed connection."))
LOG.debug(cc)
self._open = False
self._reconnect()
return None, None, None
|
<filename>clictest/notifier.py
# Copyright 2011, OpenStack Foundation
# Copyright 2012, Red Hat, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
import webob
from clictest.common import exception
from clictest.common import timeutils
from clictest.domain import proxy as domain_proxy
from clictest.i18n import _, _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ALIASES = {
'clictest.openstack.common.rpc.impl_kombu': 'rabbit',
'clictest.openstack.common.rpc.impl_qpid': 'qpid',
'clictest.openstack.common.rpc.impl_zmq': 'zmq',
}
def set_defaults(control_exchange='clictest'):
oslo_messaging.set_transport_defaults(control_exchange)
def get_transport():
return oslo_messaging.get_notification_transport(CONF, aliases=_ALIASES)
class Notifier(object):
"""Uses a notification strategy to send out messages about events."""
def __init__(self):
publisher_id = CONF.default_publisher_id
self._transport = get_transport()
self._notifier = oslo_messaging.Notifier(self._transport,
publisher_id=publisher_id)
def warn(self, event_type, payload):
self._notifier.warn({}, event_type, payload)
def info(self, event_type, payload):
self._notifier.info({}, event_type, payload)
def error(self, event_type, payload):
self._notifier.error({}, event_type, payload)
def _get_notification_group(notification):
return notification.split('.', 1)[0]
def _is_notification_enabled(notification):
disabled_notifications = CONF.disabled_notifications
notification_group = _get_notification_group(notification)
notifications = (notification, notification_group)
for disabled_notification in disabled_notifications:
if disabled_notification in notifications:
return False
return True
def _send_notification(notify, notification_type, payload):
if _is_notification_enabled(notification_type):
notify(notification_type, payload)
class NotificationBase(object):
def get_payload(self, obj):
return {}
def send_notification(self, notification_id, obj, extra_payload=None):
payload = self.get_payload(obj)
if extra_payload is not None:
payload.update(extra_payload)
_send_notification(self.notifier.info, notification_id, payload)
@six.add_metaclass(abc.ABCMeta)
class NotificationProxy(NotificationBase):
def __init__(self, repo, context, notifier):
self.repo = repo
self.context = context
self.notifier = notifier
super_class = self.get_super_class()
super_class.__init__(self, repo)
@abc.abstractmethod
def get_super_class(self):
pass
@six.add_metaclass(abc.ABCMeta)
class NotificationRepoProxy(NotificationBase):
def __init__(self, repo, context, notifier):
self.repo = repo
self.context = context
self.notifier = notifier
proxy_kwargs = {'context': self.context, 'notifier': self.notifier}
proxy_class = self.get_proxy_class()
super_class = self.get_super_class()
super_class.__init__(self, repo, proxy_class, proxy_kwargs)
@abc.abstractmethod
def get_super_class(self):
pass
@abc.abstractmethod
def get_proxy_class(self):
pass
@six.add_metaclass(abc.ABCMeta)
class NotificationFactoryProxy(object):
def __init__(self, factory, context, notifier):
kwargs = {'context': context, 'notifier': notifier}
proxy_class = self.get_proxy_class()
super_class = self.get_super_class()
super_class.__init__(self, factory, proxy_class, kwargs)
@abc.abstractmethod
def get_super_class(self):
pass
@abc.abstractmethod
def get_proxy_class(self):
pass
|
import argparse
import csv
import os
from typing import Iterable
import molanet.data.feature_extraction.data_analysis as data
from molanet.data.database import DatabaseConnection
from molanet.data.entities import MoleSample
def compute_features(sample: MoleSample):
hair = data.contains_hair(sample.image)
plaster = data.contains_plaster(sample.image)
mean, median, stddev = data.calculate_pointwise_statistics(sample.image)
features = {}
features['uuid'] = sample.uuid
features['hair'] = hair
features['plaster'] = plaster
features['mean'] = mean
features['median'] = median
features['stddev'] = stddev
for segmentation in sample.segmentations:
relative_size, absolute_size = data.calculate_mole_sizes(segmentation.mask)
features['seg_id'] = segmentation.source_id
features['rel_size'] = relative_size
features['abs_size'] = absolute_size
yield features
def extract_features(samples: Iterable[MoleSample],
features_csv_path: str,
discarded_csv_path: str,
fieldnames: [str],
delimiter=';',
offset=0):
with open(features_csv_path, 'w', newline='') as csvfile:
with open(discarded_csv_path, 'w', newline='') as discarded_csv:
writer = csv.DictWriter(csvfile,
delimiter=delimiter,
quotechar='|',
quoting=csv.QUOTE_MINIMAL,
fieldnames=fieldnames)
writer.writeheader()
discardedWriter = csv.writer(discarded_csv, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
count = offset
for sample in samples:
count += 1
if len(sample.segmentations) == 0:
discardedWriter.writerow(sample.uuid)
continue
for features in compute_features(sample):
writer.writerow(features)
if (count % 300 == 0):
print(f"{count}: computed features for {count-offset} samples")
def create_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser("Extract useful features")
parser.add_argument("--offset", type=int, default=0, help="Starting offset in data set")
parser.add_argument("--database-host", type=str, default="localhost", help="Target database host")
parser.add_argument("--database", type=str, default="molanet", help="Target database name")
parser.add_argument("--database-username", default=None, help="Target database username")
parser.add_argument("--database-password", default=None, help="Target database password")
parser.add_argument("--database-port", type=int, default=5432, help="Target database port")
parser.add_argument("--features-path", type=str, default="", help="path folder for features csv file")
return parser
if __name__ == "__main__":
# Parse arguments
parser = create_arg_parser()
args = parser.parse_args()
offset = 0
fieldnames = ['uuid', 'seg_id', 'hair', 'plaster', 'mean', 'median', 'stddev', 'rel_size', 'abs_size']
delimiter = ";"
features_csv_path = f"features_{offset}.csv"
discarded_csv_path = f"discarded_{offset}.csv"
features_csv_path = os.path.join(args.features_path, features_csv_path)
discarded_csv_path = os.path.join(args.features_path, discarded_csv_path)
with DatabaseConnection(
args.database_host,
args.database,
username=args.database_username,
password=args.database_password) as db:
extract_features(db.get_samples(offset=offset),
features_csv_path,
discarded_csv_path,
fieldnames,
delimiter=delimiter,
offset=offset)
|
import json
import random
from copy import deepcopy
from datetime import datetime
from flask import Markup
from flask import Response, current_app, flash, redirect, url_for
from flask_admin.actions import action
from quokka.utils.text import slugify
class PublishAction(object):
@action(
'toggle_publish',
'Publish/Unpublish',
'Publish/Unpublish?'
)
def action_toggle_publish(self, ids):
for _id in ids:
model = current_app.db.get_with_content(_id=_id)
model['published'] = not model['published']
# fires the versioning and hooks
self._on_model_change(None, model, False)
pk = self.get_pk_value(model)
self.coll.update({'_id': pk}, model)
# more hooks
self.after_model_change(None, model, False)
flash(
f'{len(ids)} items were successfully published/Unpublished.',
'success'
)
class CloneAction(object):
@action(
'clone_item',
'Create a copy',
'Are you sure you want a copy?'
)
def action_clone_item(self, ids):
if len(ids) > 1:
flash(
"You can select only one item for this action",
'error'
)
return
model = current_app.db.get_with_content(_id=ids[0])
clone = deepcopy(model)
del clone['_id']
clone['slug'] = f'{clone["slug"]}-{random.getrandbits(32)}'
clone['_isclone'] = True
self._on_model_change(None, clone, True)
self.coll.insert(clone)
self.after_model_change(None, clone, True)
return redirect(url_for('.edit_view', id=clone['_id']))
class UserProfileBlockAction(object):
@action(
'create_userprofile',
'Create user profile block',
'Are you sure you want to create user profile block?'
)
def action_create_userprofile(self, ids):
for _id in ids:
user = current_app.db.users.find_one({'_id': _id})
if not user.get('fullname'):
user['fullname'] = user['username']
current_app.db.users.update_one(
{'_id': user['_id']}, {'fullname': user['fullname']}
)
# This update looks like having a cache
# self.coll.update_one(
# {'_id': _id}, {'fullname': user['fullname']}
# )
fullslug = slugify(user['fullname'])
existing_block = current_app.db.get(
'index', {'content_type': 'block', 'slug': fullslug}
)
if existing_block:
blocklink = url_for(
'quokka.core.content.admin.blockview.edit_view',
id=existing_block['_id']
)
flash(Markup(
f'Profile block for {user["username"]} '
f'already exists at: '
f'<a href="{blocklink}">{existing_block["_id"]}</a>'
))
else:
# TODO: move creation logic to a model like obj
new_data = {
'title': user['fullname'],
'slug': fullslug,
'summary': f'Profile page for {user["username"]}',
'published': True,
'comments': False,
'content_type': 'block',
'version': 0,
'date': datetime.now(),
'modified': datetime.now(),
'language': 'en',
'content_format': 'markdown',
'created_by': 'admin',
'modified_by': 'admin',
'category': '',
'category_slug': '',
'custom_vars': [
{'key': 'profile_title',
'value': f'@note change this field to customize html page title'}, # noqa
{'key': 'twitter',
'value': f'@note Fill this field with user twitter profile e.g: http://twitter.com/{user["username"]}'}, # noqa
{'key': 'facebook',
'value': f'@note Fill this field with user facebook profile e.g: http://facebook.com/{user["username"]}'}, # noqa
{'key': 'pinterest',
'value': f'@note Fill this field with user pinterest profile e.g: http://pinterest.com/{user["username"]}'}, # noqa
{'key': 'github',
'value': f'@note Fill this field with user github profile e.g http://github.com/{user["username"]}'}, # noqa
{'key': 'aboutme',
'value': f'@note Fill this field with user about.me profile e.g: http://aboutme.com/{user["username"]}'}, # noqa
{'key': 'instagram',
'value': f'@note Fill this field with user instagram profile e.g: http://instagram.com/{user["username"]}'}, # noqa
{'key': 'site',
'value': '@note Fill this field with user website link'}, # noqa
{'key': 'banner_color', 'value': '@note Fill this field with a color code or name e.g: #ffcc00 or yellow'}, # noqa
{'key': 'banner_image', 'value': '@note Fill this field witha banner image url e.g: http://site.com/image.jpg'}, # noqa
{'key': 'gravatar_email', 'value': '@note Fill this field with gravatar registered email e.g: <EMAIL>'}, # noqa
{'key': 'author_avatar', 'value': '@note Fill this field with an absolute url to a profile image e.g: http://site.com/image.png'}, # noqa
],
'quokka_create_form_class': 'FormMeta',
'quokka_create_form_module': 'quokka.core.content.formats',
'quokka_format_class': 'MarkdownFormat',
'quokka_format_module': 'quokka.core.content.formats',
'quokka_module': 'quokka.core.content.admin',
'tags_slug': None,
'block_items': [],
'authors_slug': None,
}
new = current_app.db.insert('index', new_data)
new_data['_id'] = new.inserted_id
current_app.db.push_content(new_data)
newlink = url_for(
'quokka.core.content.admin.blockview.edit_view',
id=new.inserted_id
)
flash(Markup(
f'Profile block for {user["username"]} '
f'Created at: '
f'<a href="{newlink}">{new.inserted_id}</a>'
))
# TODO: Serialize and activate this action
class ExportAction(object):
@action('export_to_json', 'Export as json')
def export_to_json(self, ids):
qs = self.model.objects(id__in=ids)
return Response(
qs.to_json(),
mimetype="text/json",
headers={
"Content-Disposition":
"attachment;filename=%s.json" % self.model.__name__.lower()
}
)
@action('export_to_csv', 'Export as csv')
def export_to_csv(self, ids):
qs = json.loads(self.model.objects(id__in=ids).to_json())
def generate():
yield ','.join(list(max(qs, key=lambda x: len(x)).keys())) + '\n'
for item in qs:
yield ','.join([str(i) for i in list(item.values())]) + '\n'
return Response(
generate(),
mimetype="text/csv",
headers={
"Content-Disposition":
"attachment;filename=%s.csv" % self.model.__name__.lower()
}
)
|
'''
@description 2019/09/09 23:44
'''
# 列表的常用方法:
# 1.append:在列表末尾添加元素
# JavaScript
'''
fruits = ['苹果']
fruits.push('香蕉')
'''
fruits = ['苹果']
fruits.append('香蕉')
print(fruits) # ['苹果', '香蕉']
print('='*20)
# 2.count: 统计某个元素在列表中出现的次数
word_lsit = ['to', 'be', 'or', 'not', 'to', 'be']
word_count = word_lsit.count('be')
print(word_count) # 2
print('='*20)
# 3.extend:将一个列表中的元素追加到另外一个列表中
# 注意事项:改变原数据
# JavaScript
'''
arr = [1,2,3].concat([4,5,6])
arr = [1,2,3,...[4,5,6]]
'''
list_a = [1, 2, 3]
list_b = [4, 5, 6]
list_a.extend(list_b)
# 列表相加不改变元数据,产生新数据
# list_c = list_a + list_b
# print(list_a) # [1, 2, 3]
# print(list_b) # [4, 5, 6]
# print(list_c) # [1, 2, 3, 4, 5, 6]
print(list_a) # [1, 2, 3, 4, 5, 6]
print(list_b) # [4, 5, 6]
print('='*20)
# 4.index: 找出列表中第一个某个值的第一个匹配项的索引位置,如果没有找到,则抛出异常
# JavaScript:找不到 -1
'''
letter_list = ['a', 'b', 'c', 'd', 'e']
letter_position = letter_list.findIndex(item => item =='e') # 4
letter_position = letter_list.findIndex(item => item =='f') # -1
'''
letter_list = ['a', 'b', 'c', 'd', 'e']
letter_position = letter_list.index('e')
# ValueError: 'f' is not in list
# letter_position = letter_list.index('f')
print(letter_position) # 4
print('='*20)
# 5.insert:将某个值插入到列表的某个位置
# JavaScript
'''
insert_arr = ['hello', 'world']
insert_arr.splice(1, 0, '你好')
insert_arr.splice(insert_arr.length, 0, '世界')
console.log(insert_arr) # ["hello", "你好", "world", "世界"]
'''
insert_arr = ['hello', 'world']
insert_arr.insert(1, '你好')
insert_arr.insert(len(insert_arr), '世界')
print(insert_arr) # ['hello', '你好', 'world', '世界']
print(len(insert_arr)) # 4
print('='*20)
'''
@description 2019/09/10 23:47
'''
# 6.pop方法:移除列表中最后一个元素,并且返回该元素的值。
pop_list = [1, 2, 3, 4]
pop_item = pop_list.pop()
print(pop_item) # 4
print(pop_list) # [1, 2, 3]
print('='*20)
# 7.remove方法:移除列表中第一个匹配的元素,不会返回这个被移除的元素的值。
# 如果被移除的这个值不存在列表中,则会抛出一个异常。
remove_list = [1, 2, 3, 4, 5, 1, 2]
remove_list.remove(2)
print(remove_list) # [1, 3, 4, 5, 1, 2]
print('='*20)
# 8.reverse:将列表中的元素反向存储。会更改原来列表的值
reverse_list = [1, 2, 3, 4, 5]
reverse_list.reverse() # [5, 4, 3, 2, 1]
# 切片反转list,不会改变原list数据,产生新数据
# new_reverse_list = reverse_list[-1::-1]
# print(reverse_list) # [1, 2, 3, 4, 5]
# print(new_reverse_list) # [5, 4, 3, 2, 1]
print(reverse_list) # [5, 4, 3, 2, 1]
print('='*20)
# 9.sort:将列表中的元素进行排序。会改变原来列表中的位置。
sort_arr = [10, 7, 3, 12, 22, 1, 2, 5, 9]
# 从小到大,顺序
# sort_arr.sort()
# print(sort_arr) # [1, 2, 3, 5, 7, 9, 10, 12, 22]
sort_arr.sort(reverse = True)
print(sort_arr) # [22, 12, 10, 9, 7, 5, 3, 2, 1]
print('='*20)
# 10.del关键字:根据下标删除元素,改变原来列表
del_list = [10, 7, 3, 12, 22, 1, 2, 5, 9]
del del_list[0]
print(del_list) # [7, 3, 12, 22, 1, 2, 5, 9]
print('='*20)
# 11.in关键字:使用in判断表中是否有某个元素
in_list = [10, 7, 3, 12, 22, 1, 2, 5, 9]
in_item_position = in_list.index(10)
print(in_item_position)
if 10 in in_list:
print('True')
else:
print('False')
print('='*20)
# 12. list函数:将其他数据类型转化为列表,不改变原数据,产生新数据
word_str = '<PASSWORD>'
new_word_list = list(word_str)
print(word_str) # hello world
print(new_word_list) # ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd']
print(type(new_word_list)) # list
|
<reponame>Underscore-tZ/mkw-sp
#!/usr/bin/env python3
import os, sys
from vendor.ninja_syntax import Writer
try:
import json5
del json5
except ModuleNotFoundError:
raise SystemExit("Error: pyjson5 not found. Please install it with `python -m pip install json5`")
n = Writer(open('build.ninja', 'w'))
n.variable('ninja_required_version', '1.3')
n.newline()
n.variable('builddir', 'build')
n.variable('outdir', 'out')
n.newline()
devkitppc = os.environ.get("DEVKITPPC")
n.variable('cc', os.path.join(devkitppc, 'bin', 'powerpc-eabi-gcc'))
n.variable('cpp', os.path.join(devkitppc, 'bin', 'powerpc-eabi-g++'))
n.variable('port', 'port.py')
n.newline()
asflags = [
'-isystem', 'include',
'-isystem', 'payload',
]
cflags = [
'-fms-extensions',
'-fno-asynchronous-unwind-tables',
'-fplan9-extensions',
'-fshort-wchar',
'-isystem', 'include',
'-isystem', 'payload',
'-O2',
'-Wall',
'-Werror=implicit-function-declaration',
'-Werror=incompatible-pointer-types',
'-Wextra',
'-Wno-packed-bitfield-compat',
]
cppflags = [
'-fms-extensions',
'-fno-asynchronous-unwind-tables',
# '-fplan9-extensions',
'-fshort-wchar',
'-isystem', 'include',
'-isystem', 'payload',
'-O2',
'-Wall',
# '-Werror=implicit-function-declaration',
# '-Werror=incompatible-pointer-types',
'-Wextra',
'-Wno-packed-bitfield-compat',
'-fno-exceptions',
'-fno-unwind-tables',
]
ldflags = [
'-nostdlib',
'-Wl,--entry=start',
'-Wl,--oformat,binary',
]
n.variable('asflags', ' '.join(asflags))
n.variable('cflags', ' '.join(cflags))
n.variable('cppflags', ' '.join(cppflags))
n.variable('ldflags', ' '.join(ldflags))
n.newline()
n.rule(
'as',
command = '$cc -MD -MT $out -MF $out.d $asflags -c $in -o $out',
depfile = '$out.d',
deps = 'gcc',
description = 'AS $out',
)
n.newline()
n.rule(
'cc',
command = '$cc -MD -MT $out -MF $out.d $cflags -c $in -o $out',
depfile = '$out.d',
deps = 'gcc',
description = 'CC $out',
)
n.newline()
n.rule(
'cpp',
command = '$cpp -MD -MT $out -MF $out.d $cppflags -c $in -o $out',
depfile = '$out.d',
deps = 'gcc',
description = 'CPP $out',
)
n.newline()
n.rule(
'port',
command = f'{sys.executable} $port $region $in $out',
description = 'PORT $out'
)
n.newline()
ldparams = [
'-Wl,--defsym,base=$base',
'-Wl,-T,$script',
]
n.rule(
'ld',
command = '$cc $ldflags ' + ' '.join(ldparams) + ' $in -o $out',
description = 'LD $out',
)
n.newline()
code_in_files = {
'loader': [
os.path.join('Loader.c'),
],
'payload': [
os.path.join('egg', 'core', 'eggHeap.c'),
os.path.join('egg', 'core', 'eggVideo.S'),
os.path.join('egg', 'core', 'eggSystem.c'),
os.path.join('game', 'effect', 'Effect.S'),
os.path.join('game', 'gfx', 'Camera.S'),
os.path.join('game', 'gfx', 'CameraManager.S'),
os.path.join('game', 'host_system', 'BootStrapScene.c'),
os.path.join('game', 'host_system', 'Patcher.c'),
os.path.join('game', 'host_system', 'Payload.c'),
os.path.join('game', 'host_system', 'RkSystem.c'),
os.path.join('game', 'host_system', 'SceneManager.S'),
os.path.join('game', 'host_system', 'SceneManager.c'),
os.path.join('game', 'item', 'ItemObjKouraAka.S'),
os.path.join('game', 'item', 'ItemObjKouraAo.S'),
os.path.join('game', 'item', 'ItemObjKouraMidori.S'),
os.path.join('game', 'kart', 'KartMove.S'),
os.path.join('game', 'kart', 'KartObject.S'),
os.path.join('game', 'kart', 'KartObjectManager.S'),
os.path.join('game', 'kart', 'KartObjectManager.c'),
os.path.join('game', 'kart', 'KartParam.S'),
os.path.join('game', 'kart', 'KartState.S'),
os.path.join('game', 'kart', 'KartSub.S'),
os.path.join('game', 'obj', 'ObjManager.c'),
os.path.join('game', 'obj', 'ObjPylon01.S'),
os.path.join('game', 'obj', 'ObjPylon01.c'),
os.path.join('game', 'race', 'Driver.S'),
os.path.join('game', 'race', 'DriverManager.c'),
os.path.join('game', 'race', 'JugemManager.S'),
os.path.join('game', 'race', 'JugemManager.c'),
os.path.join('game', 'snd', 'Snd.S'),
os.path.join('game', 'system', 'CourseMap.S'),
os.path.join('game', 'system', 'DvdArchive.S'),
os.path.join('game', 'system', 'GhostFile.c'),
os.path.join('game', 'system', 'HomeButton.S'),
os.path.join('game', 'system', 'InputManager.S'),
os.path.join('game', 'system', 'InputManager.c'),
os.path.join('game', 'system', 'Mii.S'),
os.path.join('game', 'system', 'MultiDvdArchive.S'),
os.path.join('game', 'system', 'MultiDvdArchive.c'),
os.path.join('game', 'system', 'NandHelper.c'),
os.path.join('game', 'system', 'NandManager.S'),
os.path.join('game', 'system', 'RaceConfig.S'),
os.path.join('game', 'system', 'RaceConfig.c'),
os.path.join('game', 'system', 'RaceManager.S'),
os.path.join('game', 'system', 'RaceManager.c'),
os.path.join('game', 'system', 'ResourceManager.S'),
os.path.join('game', 'system', 'ResourceManager.c'),
os.path.join('game', 'system', 'SaveManager.S'),
os.path.join('game', 'system', 'SaveManager.c'),
os.path.join('game', 'system', 'Yaz.c'),
os.path.join('game', 'ui', 'GhostManagerPage.S'),
os.path.join('game', 'ui', 'GhostManagerPage.c'),
os.path.join('game', 'ui', 'GhostSelectButton.c'),
os.path.join('game', 'ui', 'GhostSelectControl.c'),
os.path.join('game', 'ui', 'Layout.S'),
os.path.join('game', 'ui', 'License.S'),
os.path.join('game', 'ui', 'License.c'),
os.path.join('game', 'ui', 'LicenseSelectButton.c'),
os.path.join('game', 'ui', 'LicenseSelectPage.c'),
os.path.join('game', 'ui', 'LicenseSettingsPage.c'),
os.path.join('game', 'ui', 'Map2DRenderer.c'),
os.path.join('game', 'ui', 'MiiGroup.c'),
os.path.join('game', 'ui', 'Model.S'),
os.path.join('game', 'ui', 'Page.c'),
os.path.join('game', 'ui', 'SaveManagerProxy.S'),
os.path.join('game', 'ui', 'Section.S'),
os.path.join('game', 'ui', 'Section.c'),
os.path.join('game', 'ui', 'SectionManager.S'),
os.path.join('game', 'ui', 'SectionManager.c'),
os.path.join('game', 'ui', 'TabControl.c'),
os.path.join('game', 'ui', 'TimeAttackGhostListPage.c'),
os.path.join('game', 'ui', 'TimeAttackRulesPage.c'),
os.path.join('game', 'ui', 'TimeAttackTopPage.S'),
os.path.join('game', 'ui', 'TitlePage.S'),
os.path.join('game', 'ui', 'TitlePage.c'),
os.path.join('game', 'ui', 'UIAnimator.c'),
os.path.join('game', 'ui', 'UIControl.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlMenuBackButton.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlMenuInstructionText.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlMenuPageTitleText.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRace2DMap.S'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceBase.S'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceBase.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceInputDisplay.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceLap.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceNameBalloon.S'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceNameBalloon.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceSpeed.c'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceTime.S'),
os.path.join('game', 'ui', 'ctrl', 'CtrlRaceTime.c'),
os.path.join('game', 'ui', 'page', 'CharacterSelectPage.S'),
os.path.join('game', 'ui', 'page', 'CourseSelectPage.S'),
os.path.join('game', 'ui', 'page', 'CupSelectPage.S'),
os.path.join('game', 'ui', 'page', 'DemoPage.S'),
os.path.join('game', 'ui', 'page', 'DriftSelectPage.S'),
os.path.join('game', 'ui', 'page', 'MachineSelectPage.S'),
os.path.join('game', 'ui', 'page', 'RaceMenuPage.S'),
os.path.join('game', 'ui', 'page', 'RaceMenuPage.c'),
os.path.join('game', 'ui', 'page', 'RacePage.S'),
os.path.join('game', 'ui', 'page', 'RacePage.c'),
os.path.join('game', 'ui', 'page', 'SingleTopMenuPage.S'),
os.path.join('game', 'ui', 'page', 'SingleTopMenuPage.c'),
os.path.join('game', 'ui', 'page', 'TimeAttackSplitsPage.S'),
os.path.join('game', 'ui', 'page', 'TimeAttackSplitsPage.c'),
os.path.join('game', 'ui', 'page', 'TopMenuPage.S'),
os.path.join('game', 'ui', 'page', 'TopMenuPage.c'),
os.path.join('game', 'ui', 'page', 'VsMenuPage.S'),
os.path.join('game', 'ui', 'page', 'VsMenuPage.c'),
os.path.join('game', 'util', 'Input.S'),
os.path.join('nw4r', 'lyt', 'lyt_arcResourceAccessor.S'),
os.path.join('nw4r', 'lyt', 'lyt_layout.S'),
os.path.join('nw4r', 'snd', 'snd_DvdSoundArchive.S'),
os.path.join('revolution', 'dvd.c'),
os.path.join('revolution', 'nand.c'),
os.path.join('sp', 'Fatal.c'),
os.path.join('sp', 'FlameGraph.c'),
os.path.join('sp', 'IOSKeyboard.c'),
os.path.join('sp', 'Keyboard.c'),
],
}
code_out_files = {target: [] for target in code_in_files}
for target in code_in_files:
for in_file in code_in_files[target]:
in_file = os.path.join(target, in_file)
_, ext = os.path.splitext(in_file)
out_file = os.path.join('$builddir', in_file + '.o')
rule = {
'.S': 'as',
'.c': 'cc',
'.cpp': 'cpp',
}[ext]
n.build(
out_file,
rule,
in_file,
)
code_out_files[target] += [out_file]
n.newline()
for region in ['P', 'E', 'J', 'K']:
n.build(
os.path.join('$builddir', 'scripts', f'RMC{region}.ld'),
'port',
os.path.join('.', 'symbols.txt'),
variables = {
'region': region,
},
implicit = '$port',
)
n.newline()
n.build(
os.path.join('$outdir', f'loader.bin'),
'ld',
code_out_files['loader'],
variables = {
'base': '0x80004000',
'script': os.path.join('loader', 'RMC.ld'),
},
implicit = os.path.join('loader', 'RMC.ld'),
)
n.newline()
for region in ['P', 'E', 'J', 'K']:
n.build(
os.path.join('$outdir', 'disc', 'bin', f'payload{region}.bin'),
'ld',
code_out_files['payload'],
variables = {
'base': {
'P': '0x8076db60',
'E': '0x80769400',
'J': '0x8076cca0',
'K': '0x8075bfe0',
}[region],
'script': os.path.join('$builddir', 'scripts', f'RMC{region}.ld'),
},
implicit = os.path.join('$builddir', 'scripts', f'RMC{region}.ld'),
)
n.newline()
n.variable('wuj5', os.path.join('vendor', 'wuj5', 'wuj5.py'))
n.newline()
n.rule(
'wuj5',
command = f'{sys.executable} $wuj5 encode $in -o $out',
description = 'WUJ5 $out',
)
n.newline()
n.rule(
'cp',
command = 'cp $in $out',
description = 'CP $out',
)
n.newline()
n.rule(
'szs',
command = f'{sys.executable} $wuj5 encode $szsin -o $out --retained $in $args',
description = 'SZS $out',
)
n.newline()
asset_in_files = {
'MultSP_E.szs': [
os.path.join('message', 'Menu_E.bmg.json5'),
],
'MultSP_F.szs': [
os.path.join('message', 'Menu_F.bmg.json5'),
],
'MultSP_G.szs': [
os.path.join('message', 'Menu_G.bmg.json5'),
],
'MultSP_I.szs': [
os.path.join('message', 'Menu_I.bmg.json5'),
],
'MultSP_J.szs': [
os.path.join('message', 'Menu_J.bmg.json5'),
],
'MultSP_K.szs': [
os.path.join('message', 'Menu_K.bmg.json5'),
],
'MultSP_M.szs': [
os.path.join('message', 'Menu_M.bmg.json5'),
],
'MultSP_Q.szs': [
os.path.join('message', 'Menu_Q.bmg.json5'),
],
'MultSP_S.szs': [
os.path.join('message', 'Menu_S.bmg.json5'),
],
'MultSP_U.szs': [
os.path.join('message', 'Menu_U.bmg.json5'),
],
'OthrSP.szs': [
os.path.join('control', 'anim', 'common_w023_rule_menu_fade_in_after.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_fade_in_before.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_fade_in.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_fade_out.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_free.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_free_to_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_fuchi_check_loop.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_select_to_free.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_text_light_01_ok.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_text_light_01_stop.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_text_light_02_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w023_rule_menu_text_light_02_stop.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_active_off.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_active_off_to_on.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_active_on.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_fade_in_after.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_fade_in_before.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_fade_in.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_fade_out.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_free.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_free_to_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_fuchi_check_loop.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_select_to_free.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_text_light_01_ok.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_text_light_01_stop.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_text_light_02_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w024_rule_icon_text_light_02_stop.brlan.json5'),
os.path.join('control', 'blyt', 'common_w024_rule_icon.brlyt.json5'),
os.path.join('control', 'blyt', 'common_w076_license_icon_center.brlyt.json5'),
os.path.join('control', 'blyt', 'common_w201_setting_menu.brlyt.json5'),
os.path.join('control', 'ctrl', 'LicenseDisplay.brctr.json5'),
os.path.join('control', 'ctrl', 'LicenseManagement.brctr.json5'),
os.path.join('control', 'ctrl', 'LicenseSettingRadioBase.brctr.json5'),
os.path.join('control', 'ctrl', 'LicenseSettingRadioOption.brctr.json5'),
os.path.join('control', 'timg', 'tt_license_icon_004.tpl'),
],
'OthrSP_E.szs': [
os.path.join('message', 'Menu_E.bmg.json5'),
],
'OthrSP_F.szs': [
os.path.join('message', 'Menu_F.bmg.json5'),
],
'OthrSP_G.szs': [
os.path.join('message', 'Menu_G.bmg.json5'),
],
'OthrSP_I.szs': [
os.path.join('message', 'Menu_I.bmg.json5'),
],
'OthrSP_J.szs': [
os.path.join('message', 'Menu_J.bmg.json5'),
],
'OthrSP_K.szs': [
os.path.join('message', 'Menu_K.bmg.json5'),
],
'OthrSP_M.szs': [
os.path.join('message', 'Menu_M.bmg.json5'),
],
'OthrSP_Q.szs': [
os.path.join('message', 'Menu_Q.bmg.json5'),
],
'OthrSP_S.szs': [
os.path.join('message', 'Menu_S.bmg.json5'),
],
'OthrSP_U.szs': [
os.path.join('message', 'Menu_U.bmg.json5'),
],
'SnglSP.szs': [
os.path.join('button', 'blyt', 'common_w129_movie_button_single_top.brlyt.json5'),
os.path.join('button', 'ctrl', 'SingleTop.brctr.json5'),
os.path.join('button', 'ctrl', 'TimeAttackGhostListArrowLeft.brctr.json5'),
os.path.join('button', 'ctrl', 'TimeAttackGhostListArrowRight.brctr.json5'),
os.path.join('button', 'ctrl', 'TimeAttackGhostList.brctr.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_active_off.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_active_off_to_on.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_active_on.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_free.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_free_to_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_light_01_ok.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_light_01_stop.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_light_02_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_light_02_stop.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_select.brlan.json5'),
os.path.join('control', 'anim', 'common_w200_ghost_button_select_to_free.brlan.json5'),
os.path.join('control', 'anim', 'friend_room_comment_container_center_to_right.brlan.json5'),
os.path.join('control', 'anim', 'friend_room_comment_container_hide.brlan.json5'),
os.path.join('control', 'anim', 'friend_room_comment_container_left_to_center.brlan.json5'),
os.path.join('control', 'anim', 'friend_room_comment_container_show.brlan.json5'),
os.path.join('control', 'blyt', 'common_w200_ghost_button.brlyt.json5'),
os.path.join('control', 'blyt', 'ghost_container.brlyt.json5'),
os.path.join('control', 'ctrl', 'GhostSelectBase.brctr.json5'),
os.path.join('control', 'ctrl', 'GhostSelectOption.brctr.json5'),
os.path.join('control', 'ctrl', 'TASettingRadioBase.brctr.json5'),
os.path.join('control', 'ctrl', 'TASettingRadioOption.brctr.json5'),
os.path.join('control', 'ctrl', 'TimeAttackGhostListPageNum.brctr.json5'),
],
'SnglSP_E.szs': [
os.path.join('message', 'Menu_E.bmg.json5'),
],
'SnglSP_F.szs': [
os.path.join('message', 'Menu_F.bmg.json5'),
],
'SnglSP_G.szs': [
os.path.join('message', 'Menu_G.bmg.json5'),
],
'SnglSP_I.szs': [
os.path.join('message', 'Menu_I.bmg.json5'),
],
'SnglSP_J.szs': [
os.path.join('message', 'Menu_J.bmg.json5'),
],
'SnglSP_K.szs': [
os.path.join('message', 'Menu_K.bmg.json5'),
],
'SnglSP_M.szs': [
os.path.join('message', 'Menu_M.bmg.json5'),
],
'SnglSP_Q.szs': [
os.path.join('message', 'Menu_Q.bmg.json5'),
],
'SnglSP_S.szs': [
os.path.join('message', 'Menu_S.bmg.json5'),
],
'SnglSP_U.szs': [
os.path.join('message', 'Menu_U.bmg.json5'),
],
'RaceSP.szs': [
os.path.join('button', 'ctrl', 'AfterMenuTimeAttack.brctr.json5'),
os.path.join('button', 'ctrl', 'PauseMenuGhostWatch.brctr.json5'),
os.path.join('button', 'ctrl', 'PauseMenuTimeAttack.brctr.json5'),
os.path.join('button', 'ctrl', 'PauseMenuVS.brctr.json5'),
os.path.join('game_image', 'anim', 'game_image_speed_texture_pattern_0_9.brlan.json5'),
os.path.join('game_image', 'blyt', 'game_image_speed.brlyt.json5'),
os.path.join('game_image', 'blyt', 'InputDisplay.brlyt.json5'),
os.path.join('game_image', 'ctrl', 'battle_total_point.brctr.json5'),
os.path.join('game_image', 'ctrl', 'InputDisplay.brctr.json5'),
os.path.join('game_image', 'ctrl', 'lap_number.brctr.json5'),
os.path.join('game_image', 'ctrl', 'position_multi.brctr.json5'),
os.path.join('game_image', 'ctrl', 'speed_number.brctr.json5'),
os.path.join('game_image', 'ctrl', 'time_number.brctr.json5'),
os.path.join('game_image', 'timg', 'basic_accel_off.tpl'),
os.path.join('game_image', 'timg', 'basic_accel_on.tpl'),
os.path.join('game_image', 'timg', 'basic_cstick_bg.tpl'),
os.path.join('game_image', 'timg', 'basic_cstick_center.tpl'),
os.path.join('game_image', 'timg', 'basic_dpad_down.tpl'),
os.path.join('game_image', 'timg', 'basic_dpad_left.tpl'),
os.path.join('game_image', 'timg', 'basic_dpad_off.tpl'),
os.path.join('game_image', 'timg', 'basic_dpad_right.tpl'),
os.path.join('game_image', 'timg', 'basic_dpad_up.tpl'),
os.path.join('game_image', 'timg', 'basic_trigger_bd_off.tpl'),
os.path.join('game_image', 'timg', 'basic_trigger_bd_on.tpl'),
os.path.join('game_image', 'timg', 'basic_trigger_l_off.tpl'),
os.path.join('game_image', 'timg', 'basic_trigger_l_on.tpl'),
os.path.join('game_image', 'timg', 'basic_trigger_r_off.tpl'),
os.path.join('game_image', 'timg', 'basic_trigger_r_on.tpl'),
os.path.join('game_image', 'timg', 'tt_d_number_3d_minus.tpl'),
os.path.join('game_image', 'timg', 'tt_d_number_3d_none.tpl'),
],
'RaceSP_E.szs': [
os.path.join('game_image', 'timg', 'tt_speed_E.tpl'),
os.path.join('message', 'Menu_E.bmg.json5'),
os.path.join('message', 'Race_E.bmg.json5'),
],
'RaceSP_F.szs': [
os.path.join('game_image', 'timg', 'tt_speed_F.tpl'),
os.path.join('message', 'Menu_F.bmg.json5'),
os.path.join('message', 'Race_F.bmg.json5'),
],
'RaceSP_G.szs': [
os.path.join('game_image', 'timg', 'tt_speed_G.tpl'),
os.path.join('message', 'Menu_G.bmg.json5'),
os.path.join('message', 'Race_G.bmg.json5'),
],
'RaceSP_I.szs': [
os.path.join('game_image', 'timg', 'tt_speed_I.tpl'),
os.path.join('message', 'Menu_I.bmg.json5'),
os.path.join('message', 'Race_I.bmg.json5'),
],
'RaceSP_J.szs': [
os.path.join('game_image', 'timg', 'tt_speed_E.tpl'),
os.path.join('message', 'Menu_J.bmg.json5'),
os.path.join('message', 'Race_J.bmg.json5'),
],
'RaceSP_K.szs': [
os.path.join('game_image', 'timg', 'tt_speed_E.tpl'),
os.path.join('message', 'Menu_K.bmg.json5'),
os.path.join('message', 'Race_K.bmg.json5'),
],
'RaceSP_M.szs': [
os.path.join('game_image', 'timg', 'tt_speed_S.tpl'),
os.path.join('message', 'Menu_M.bmg.json5'),
os.path.join('message', 'Race_M.bmg.json5'),
],
'RaceSP_Q.szs': [
os.path.join('game_image', 'timg', 'tt_speed_F.tpl'),
os.path.join('message', 'Menu_Q.bmg.json5'),
os.path.join('message', 'Race_Q.bmg.json5'),
],
'RaceSP_S.szs': [
os.path.join('game_image', 'timg', 'tt_speed_S.tpl'),
os.path.join('message', 'Menu_S.bmg.json5'),
os.path.join('message', 'Race_S.bmg.json5'),
],
'RaceSP_U.szs': [
os.path.join('game_image', 'timg', 'tt_speed_E.tpl'),
os.path.join('message', 'Menu_U.bmg.json5'),
os.path.join('message', 'Race_U.bmg.json5'),
],
'TitlSP.szs': [
os.path.join('button', 'blyt', 'common_w076_license_icon_center.brlyt.json5'),
os.path.join('button', 'ctrl', 'LicenseSelect.brctr.json5'),
os.path.join('button', 'ctrl', 'TopMenuMultiWaku.brctr.json5'),
os.path.join('button', 'ctrl', 'TopMenuSingleWaku.brctr.json5'),
os.path.join('button', 'timg', 'tt_license_icon_004.tpl'),
],
'TitlSP_E.szs': [
os.path.join('message', 'Menu_E.bmg.json5'),
],
'TitlSP_F.szs': [
os.path.join('message', 'Menu_F.bmg.json5'),
],
'TitlSP_G.szs': [
os.path.join('message', 'Menu_G.bmg.json5'),
],
'TitlSP_I.szs': [
os.path.join('message', 'Menu_I.bmg.json5'),
],
'TitlSP_J.szs': [
os.path.join('message', 'Menu_J.bmg.json5'),
],
'TitlSP_K.szs': [
os.path.join('message', 'Menu_K.bmg.json5'),
],
'TitlSP_M.szs': [
os.path.join('message', 'Menu_M.bmg.json5'),
],
'TitlSP_Q.szs': [
os.path.join('message', 'Menu_Q.bmg.json5'),
],
'TitlSP_S.szs': [
os.path.join('message', 'Menu_S.bmg.json5'),
],
'TitlSP_U.szs': [
os.path.join('message', 'Menu_U.bmg.json5'),
],
}
asset_out_files = {target: [] for target in asset_in_files}
for target in asset_in_files:
for in_file in asset_in_files[target]:
base, ext = os.path.splitext(in_file)
outext = {
'.json5': '',
'.tpl': '.tpl',
}[ext]
out_file = os.path.join('$builddir', 'Shared.szs.d', base + outext)
in_file = os.path.join('assets', in_file)
out_files = [out_file for out_files in asset_out_files.values() for out_file in out_files]
if out_file not in out_files:
rule = {
'.json5': 'wuj5',
'.tpl': 'cp',
}[ext]
n.build(
out_file,
rule,
in_file,
)
asset_out_files[target] += [out_file]
n.newline()
renamed = {
'Menu_E.bmg': 'Menu.bmg',
'Menu_F.bmg': 'Menu.bmg',
'Menu_G.bmg': 'Menu.bmg',
'Menu_I.bmg': 'Menu.bmg',
'Menu_J.bmg': 'Menu.bmg',
'Menu_K.bmg': 'Menu.bmg',
'Menu_M.bmg': 'Menu.bmg',
'Menu_Q.bmg': 'Menu.bmg',
'Menu_S.bmg': 'Menu.bmg',
'Menu_U.bmg': 'Menu.bmg',
'Race_E.bmg': 'Race.bmg',
'Race_F.bmg': 'Race.bmg',
'Race_G.bmg': 'Race.bmg',
'Race_I.bmg': 'Race.bmg',
'Race_J.bmg': 'Race.bmg',
'Race_K.bmg': 'Race.bmg',
'Race_M.bmg': 'Race.bmg',
'Race_Q.bmg': 'Race.bmg',
'Race_S.bmg': 'Race.bmg',
'Race_U.bmg': 'Race.bmg',
'tt_speed_E.tpl': 'tt_speed.tpl',
'tt_speed_F.tpl': 'tt_speed.tpl',
'tt_speed_G.tpl': 'tt_speed.tpl',
'tt_speed_I.tpl': 'tt_speed.tpl',
'tt_speed_S.tpl': 'tt_speed.tpl',
}
renamed = ' '.join([f'--renamed {src} {dst}' for src, dst in renamed.items()])
for target in asset_out_files:
n.build(
os.path.join('$outdir', 'disc', 'Scene', 'UI', target),
'szs',
asset_out_files[target],
variables = {
'szsin': os.path.join('$builddir', 'Shared.szs.d'),
'args': renamed,
},
)
n.newline()
n.variable('configure', os.path.join('.', 'configure.py'))
n.newline()
n.rule(
'configure',
command = '$configure',
generator = True,
)
n.build(
'build.ninja',
'configure',
implicit = [
'$configure',
os.path.join('vendor', 'ninja_syntax.py'),
],
)
|
import os
import codecs
import random
from docopt import docopt
from collections import defaultdict
def main():
"""
Sample from the top K of each day and create batch instances.
"""
args = docopt("""Sample from the top K of each day and create batch instances.
Usage: sample_for_second_annotation_task.py <k> <num_instances> <out_file>
<k> k (as in top k)
<num_instances> How many instances from each day.
<out_file> Batch instances csv out file.
""")
k = int(args['<k>'])
num_instances = int(args['<num_instances>'])
out_file = args['<out_file>']
types = defaultdict(list)
instances_by_type = defaultdict(list)
for day in os.listdir('.'):
if os.path.isdir(day):
# Open the types file and sample 20 types out of the top 100
with codecs.open(day + '/rules.tsv', 'r', 'utf-8') as f_in:
curr_top_k = [tuple(line.strip().split('\t')) for line in f_in][:k]
sample_types = random.sample(curr_top_k, num_instances)
[types['###'.join(sorted([p1, p2]))].append(int(day)) for (p1, p2, count, days) in sample_types]
# Open the instances file
with codecs.open(day + '/instances.tsv', 'r', 'utf-8') as f_in:
curr_instances = [tuple(line.strip().split('\t')) for line in f_in]
[instances_by_type['###'.join(sorted([p1, p2]))].append((sf_p1.replace('"', "''"), s1_a0.replace('"', "''"),
s1_a1.replace('"', "''"), sf_p2.replace('"', "''"),
s2_a0.replace('"', "''"), s2_a1.replace('"', "''")))
for (tid1, sf_p1, p1, s1_a0, s1_a1, tid2, sf_p2, p2, s2_a0, s2_a1) in curr_instances]
# Sample 5 instances for each type and generate annotation instances
with codecs.open(out_file, 'w', 'utf-8') as f_out:
# Header
print >> f_out, '"' + '","'.join(('p1', 'p2', 'days', 'inst1_1', 'inst1_2', 'inst2_1', 'inst2_2',
'inst3_1', 'inst3_2', 'inst4_1', 'inst4_2', 'inst5_1', 'inst5_2')) + '"'
for type, days in types.iteritems():
p1, p2 = type.split('###')
days = '-'.join(map(str, days))
sample_instances = random.sample(instances_by_type[type], 5)
sample_instances = [(sf_p1.replace('{a0}', "<font color='#d95f02'>%s</font>" % s1_a0).
replace('{a1}', "<font color='#7570b3'>%s</font>" % s1_a1),
sf_p2.replace('{a0}', "<font color='#d95f02'>%s</font>" % s2_a0).
replace('{a1}', "<font color='#7570b3'>%s</font>" % s2_a1))
for (sf_p1, s1_a0, s1_a1, sf_p2, s2_a0, s2_a1) in sample_instances]
sample_instances = [item for lst in sample_instances for item in lst]
print >> f_out, '"' + '","'.join([p1.replace('"', "''"), p2.replace('"', "''"), days] + sample_instances) + '"'
if __name__ == '__main__':
main()
|
<filename>src/cython/move.py
#<PyxReplace>#
from collections import namedtuple
from utils.fake_cython import cython
Board = namedtuple("Board", "none")
from constants import (
# Directions
N, S, E, W,
EMPTY,
# Board
A1, A7, H1, H7,
# Pieces
PAWN, ROOK, QUEEN, KING, PIECE_EMPTY,
# Colors
WHITE, BLACK,
# Movements
CAPTURE, BIG_PAWN, EN_PASSANT, PROMOTION,
KINGSIDE, QUEENSIDE,
# Print
PRINT_ARRAY,
# Functions
next_color,
rank
)
from functions import p0x88_to_tuple, p0x88_to_chess_notation
from zobrist import (
zobrist_castling,
zobrist_color,
zobrist_en_passant
)
#<EndReplace>#
class Move(object):
@cython.locals(
board=Board, color=cython.int, origin=cython.int, dest=cython.int,
flags=cython.int, rank_dest=cython.int, promotion=cython.int
)
def __init__(self, board, color, origin, dest, flags):
rank_dest = rank(dest)
promotion = 0
if board.pieces[origin] == PAWN and (rank_dest == 7 or rank_dest == 0):
promotion = QUEEN
self.color = color
self._origin = origin
self._destination = dest
self.flags = flags
self.piece = board.pieces[origin]
self.promotion = promotion
if promotion:
self.flags |= PROMOTION
if board.pieces[dest]:
self.captured = board.pieces[dest]
elif flags & EN_PASSANT:
self.captured = PAWN
else:
self.captured = PIECE_EMPTY
self.half_moves = board.half_moves
self.previous_en_passant = board.en_passant_square
self.previous_hash = board.hash
self.white_castling = board.castling[WHITE]
self.black_castling = board.castling[BLACK]
def do(self, board):
current = self.color
other = next_color(current)
piece = board.pieces[self._origin]
color = board.colors[self._origin]
other_piece = board.pieces[self._destination]
origin = self._origin
dest = self._destination
flags = self.flags
en_passant_square = dest + (N if current == BLACK else S)
board.remove(origin)
board.remove(dest)
board.add(piece, color, dest)
# En passant
if flags & EN_PASSANT:
board.remove(dest + (N if current == BLACK else S))
# Promotion
if flags & PROMOTION:
board.remove(dest)
board.add(self.promotion, color, dest)
if piece == KING:
board.kings[current] = dest
# Castling
if flags & KINGSIDE:
castling_origin = dest + E
castling_dest = dest + W
piece = board.pieces[castling_origin]
board.remove(castling_origin)
board.add(piece, color, castling_dest)
elif flags & QUEENSIDE:
castling_origin = dest + W + W
castling_dest = dest + E
piece = board.pieces[castling_origin]
board.remove(castling_origin)
board.add(piece, color, castling_dest)
board.castling[current] = 0
# if move rook, disable castling:
if board.castling[current] and piece == ROOK:
if current == WHITE:
if board.castling[WHITE] & KINGSIDE and origin == H1:
board.castling[WHITE] ^= KINGSIDE
elif board.castling[WHITE] & QUEENSIDE and origin == A1:
board.castling[WHITE] ^= KINGSIDE
if current == BLACK:
if board.castling[BLACK] & KINGSIDE and origin == H7:
board.castling[BLACK] ^= KINGSIDE
elif board.castling[BLACK] & QUEENSIDE and origin == A7:
board.castling[BLACK] ^= KINGSIDE
# if capture rook, disable castling
if board.castling[other] and other_piece == ROOK:
if current == WHITE:
if board.castling[BLACK] & KINGSIDE and dest == H1:
board.castling[BLACK] ^= KINGSIDE
elif board.castling[BLACK] & QUEENSIDE and dest == A1:
board.castling[BLACK] ^= KINGSIDE
if current == BLACK:
if board.castling[WHITE] & KINGSIDE and dest == H7:
board.castling[WHITE] ^= KINGSIDE
elif board.castling[WHITE] & QUEENSIDE and dest == A7:
board.castling[WHITE] ^= KINGSIDE
board.hash ^= zobrist_castling[self.castle()]
board.hash ^= zobrist_castling[board.castle()]
# big pawn
if board.en_passant_square != EMPTY:
board.hash ^= zobrist_en_passant[board.en_passant_square]
if flags & BIG_PAWN:
board.en_passant_square = en_passant_square
board.hash ^= zobrist_en_passant[board.en_passant_square]
else:
board.en_passant_square = EMPTY
# Update half move counter
if piece == PAWN or (flags & (CAPTURE | EN_PASSANT)):
board.half_moves = 0
else:
board.half_moves += 1
if current == BLACK:
board.moves += 1
board.current_color = next_color(current)
board.hash ^= zobrist_color
def do_update(self, board):
self.do(board)
piece = self.piece
color = self.color
origin = self._origin
dest = self._destination
flags = self.flags
promotion = self.promotion
en_passant_square = dest + (N if color == BLACK else S)
board.values[origin] = 0
board.values[dest] = board.piece_value(piece, color, dest)
# En passant
if flags & EN_PASSANT:
board.values[en_passant_square] = 0
# Promotion
if flags & PROMOTION:
board.values[dest] = board.piece_value(promotion, color, dest)
# Castling
if piece == KING:
if flags & KINGSIDE:
castling_origin = dest + E
castling_dest = dest + W
board.values[castling_origin] = 0
board.values[castling_dest] = board.piece_value(
ROOK, color, castling_dest)
elif flags & QUEENSIDE:
castling_origin = dest + W + W
castling_dest = dest + E
board.values[castling_origin] = 0
board.values[castling_dest] = board.piece_value(
ROOK, color, castling_dest)
def undo(self, board):
current = self.color
dest = self._destination
origin = self._origin
piece = self.piece
flags = self.flags
captured = self.captured
en_passant_square = dest + (N if current == BLACK else S)
board.current_color = current
other = next_color(current)
if current == BLACK:
board.moves -= 1
board.half_moves = self.half_moves
board.en_passant_square = self.previous_en_passant
board.castling[WHITE] = self.white_castling
board.castling[BLACK] = self.black_castling
if piece == KING:
board.kings[current] = origin
if flags & KINGSIDE:
castling_origin = dest + E
castling_dest = dest + W
rook_piece = board.pieces[castling_dest]
board.remove(castling_dest)
board.add(rook_piece, current, castling_origin)
elif flags & QUEENSIDE:
castling_origin = dest + W + W
castling_dest = dest + E
rook_piece = board.pieces[castling_dest]
board.remove(castling_dest)
board.add(rook_piece, current, castling_origin)
board.remove(dest)
board.add(piece, current, origin)
if captured:
if flags & EN_PASSANT:
board.add(PAWN, other, en_passant_square)
else:
board.add(captured, other, dest)
board.hash = self.previous_hash
def undo_update(self, board):
self.undo(board)
color = self.color
other = next_color(color)
dest = self._destination
origin = self._origin
piece = self.piece
flags = self.flags
captured = self.captured
en_passant_square = dest + (N if color == BLACK else S)
# castling
if piece == KING:
if flags & KINGSIDE:
castling_origin = dest + E
castling_dest = dest + W
board.values[castling_dest] = 0
board.values[castling_origin] = board.piece_value(
ROOK, color, castling_origin)
elif flags & QUEENSIDE:
castling_origin = dest + W + W
castling_dest = dest + E
board.values[castling_dest] = 0
board.values[castling_origin] = board.piece_value(
ROOK, color, castling_origin)
board.values[origin] = board.piece_value(piece, color, dest)
if captured:
if flags & EN_PASSANT:
board.values[en_passant_square] = board.piece_value(
PAWN, other, en_passant_square)
else:
board.values[dest] = board.piece_value(captured, other, dest)
else:
board.values[dest] = 0
def origin(self):
return self._origin
def destination(self):
return self._destination
def castle(self):
return (
(self.white_castling >> 4 >> 1) | (self.black_castling >> 2 >> 1)
)
def tuple(self):
return (
p0x88_to_tuple(self._origin),
p0x88_to_tuple(self._destination)
)
def score(self):
return self.captured
def get_flags(self):
return self.flags
def readable(self):
return "%s%s %c" % (
p0x88_to_chess_notation(self._origin),
p0x88_to_chess_notation(self._destination),
PRINT_ARRAY[self.color][self.piece]
)
def type(self):
flags = self.flags
if flags & KINGSIDE or flags & QUEENSIDE:
return 1
elif flags & EN_PASSANT:
return 2
elif flags & PROMOTION:
return 3
return 0
def set_promotion(self, new_piece):
if self.promotion != 0:
self.promotion = new_piece
def get_eliminated_pawn(self):
return p0x88_to_tuple(self._destination +
(N if self.color == BLACK else S))
def rook_from(self):
if self.flags & KINGSIDE:
return p0x88_to_tuple(self._destination + E)
else:
return p0x88_to_tuple(self._destination + W + W)
def rook_to(self):
if self.flags & KINGSIDE:
return p0x88_to_tuple(self._destination + W)
else:
return p0x88_to_tuple(self._destination + E) |
<reponame>JI411/fuzzy-doc-search
"""
Class for OCR scanned_pdf
"""
# pylint: disable=line-too-long
import tempfile
from pathlib import Path
from multiprocessing import Pool
from typing import Dict, List, Any
import datetime
import cv2
import numpy as np
from PIL import Image
import pytesseract
import fitz
import pdf2image
class Recognizer:
"""
Basic class to OCR scanned_pdf
"""
def __init__(self, dpi: int, log_path: Path, searchable_pdf_dir: Path, preprocess_config: Dict, lang: str) -> None:
"""
:param dpi: Dots per inch, dpi >= 300 recommended
:param log_path: path to log
:param searchable_pdf_dir: directory to save pdf after OCR
:param preprocess_config: config with keys 'resize', 'adaptiveThreshold', 'bilateralFilter'
:param lang: lang for tesseract-osr
"""
# pylint: disable=too-many-arguments
# because it is necessary to define all params in __init__
# we can contain some in config, but this variant more clear
self.dpi = int(dpi)
self.preprocess_config = preprocess_config
self.lang = lang
self.log_path = log_path
self.searchable_pdf_dir = searchable_pdf_dir
self.log(f'Recognizer initialization: {datetime.datetime.now()}')
def log(self, *args, **kwargs) -> None:
"""
Write log to self.log file
"""
print(*args, **kwargs)
with open(self.log_path, 'a', encoding='utf-8') as file:
print(*args, **kwargs, file=file, flush=True)
@staticmethod
def image_preprocess(image: Image, config: Dict) -> Any:
"""
Preprocess image to best recognition
:param image: PIL.Image form pdf2image.convert_from_path
:param config: config with keys 'resize', 'adaptiveThreshold', 'bilateralFilter'
:return: img as np.array
"""
img = np.array(image.convert('RGB'))[:, :, ::-1].copy()
if config.get('resize', False):
img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
if config.get('adaptiveThreshold', False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
if config.get('bilateralFilter', False):
img = cv2.bilateralFilter(img, 9, 75, 75)
return img
def scanned2searchable(self, pdf_path: Path) -> None:
"""
Use pdf2image and tesseract to convert scanned_pdf to searchable
:param pdf_path: path to one pdf file
:return: None, pdf save to searchable_pdf dir
"""
with tempfile.TemporaryDirectory(prefix=pdf_path.name) as tmp:
self.log(f'Start pdf recognition: {pdf_path.name}')
pdf: fitz.Document = fitz.Document()
images: List[Image] = pdf2image.convert_from_path(pdf_path, output_folder=tmp, dpi=self.dpi)
for page_num, img in enumerate(images):
if self.preprocess_config:
img = self.image_preprocess(image=img, config=self.preprocess_config)
try:
page = pytesseract.image_to_pdf_or_hocr(img, lang=self.lang, config='--psm 6')
# noinspection PyUnresolvedReferences
with fitz.open('pdf', page) as page:
pdf.insert_pdf(page)
# pylint: disable=broad-except
# because it was created as a tool for people who can't write code - it should work anyway
except Exception as ex:
self.log(f'{ex} on page {page_num} in pdf file {pdf_path.name}')
self.log(f'Done pdf recognition: {pdf_path.name}')
pdf.save(self.searchable_pdf_dir / pdf_path.name)
if __name__ == '__main__':
# export TESSDATA_PREFIX='/usr/share/tesseract-ocr/4.00/tessdata'
pytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract'
project_dir = Path.cwd().parent
scanned_pdf_dir = project_dir / 'inp' / 'scanned_pdf'
recognizer = Recognizer(dpi=600, log_path=project_dir / 'log.txt', lang='ru',
searchable_pdf_dir=project_dir / 'inp' / 'searchable_pdf',
preprocess_config={'resize': False, 'adaptiveThreshold': False, 'bilateralFilter': False})
with Pool(processes=4) as pool:
pool.map(recognizer.scanned2searchable, scanned_pdf_dir.glob('*.pdf'))
recognizer.log(f'Recognizer finish: {datetime.datetime.now()}')
|
import sys
from django.contrib.admin.forms import forms
from django.conf import settings
from django.apps import apps
from muddery.worlddata import forms_base
class GameSettingsForm(forms_base.GameSettingsForm):
pass
class ClassCategoriesForm(forms_base.ClassCategoriesForm):
pass
class TypeclassesForm(forms_base.TypeclassesForm):
pass
class EquipmentTypesForm(forms_base.EquipmentTypesForm):
pass
class EquipmentPositionsForm(forms_base.EquipmentPositionsForm):
pass
class CharacterCareersForm(forms_base.CharacterCareersForm):
pass
class QuestObjectiveTypesForm(forms_base.QuestObjectiveTypesForm):
pass
class EventTypesForm(forms_base.EventTypesForm):
pass
class EventTriggerTypes(forms_base.EventTriggerTypes):
pass
class QuestDependencyTypesForm(forms_base.QuestDependencyTypesForm):
pass
class WorldAreasForm(forms_base.WorldAreasForm):
pass
class WorldRoomsForm(forms_base.WorldRoomsForm):
pass
class WorldExitsForm(forms_base.WorldExitsForm):
pass
class ExitLocksForm(forms_base.ExitLocksForm):
pass
class TwoWayExitsForm(forms_base.TwoWayExitsForm):
pass
class WorldObjectsForm(forms_base.WorldObjectsForm):
pass
class WorldNPCsForm(forms_base.WorldNPCsForm):
pass
class ObjectCreatorsForm(forms_base.ObjectCreatorsForm):
pass
class CreatorLootListForm(forms_base.CreatorLootListForm):
pass
class CharacterLootListForm(forms_base.CharacterLootListForm):
pass
class QuestRewardListForm(forms_base.QuestRewardListForm):
pass
class CommonObjectsForm(forms_base.CommonObjectsForm):
pass
class FoodsForm(forms_base.FoodsForm):
pass
class SkillBooksForm(forms_base.SkillBooksForm):
pass
class CharacterAttributesForm(forms_base.CharacterAttributesForm):
pass
class EquipmentAttributesForm(forms_base.EquipmentAttributesForm):
pass
class FoodAttributesForm(forms_base.FoodAttributesForm):
pass
class CharacterModelsForm(forms_base.CharacterModelsForm):
pass
class CommonCharacterForm(forms_base.CommonCharacterForm):
pass
class DefaultObjectsForm(forms_base.DefaultObjectsForm):
pass
class ShopsForm(forms_base.ShopsForm):
pass
class ShopGoodsForm(forms_base.ShopGoodsForm):
pass
class NPCShopsForm(forms_base.NPCShopsForm):
pass
class SkillsForm(forms_base.SkillsForm):
pass
class SkillTypesForm(forms_base.SkillTypesForm):
pass
class DefaultSkillsForm(forms_base.DefaultSkillsForm):
pass
class NPCDialoguesForm(forms_base.NPCDialoguesForm):
pass
class QuestsForm(forms_base.QuestsForm):
pass
class QuestObjectivesForm(forms_base.QuestObjectivesForm):
pass
class QuestDependenciesForm(forms_base.QuestDependenciesForm):
pass
class DialogueQuestDependenciesForm(forms_base.DialogueQuestDependenciesForm):
pass
class EquipmentsForm(forms_base.EquipmentsForm):
pass
class CareerEquipmentsForm(forms_base.CareerEquipmentsForm):
pass
class EventDataForm(forms_base.EventDataForm):
pass
class EventAttacksForm(forms_base.EventAttacksForm):
pass
class EventDialoguesForm(forms_base.EventDialoguesForm):
pass
class DialoguesForm(forms_base.DialoguesForm):
pass
class DialogueRelationsForm(forms_base.DialogueRelationsForm):
pass
class DialogueSentencesForm(forms_base.DialogueSentencesForm):
pass
class ConditionDescForm(forms_base.ConditionDescForm):
pass
class LocalizedStringsForm(forms_base.LocalizedStringsForm):
pass
class ImageResourcesForm(forms_base.ImageResourcesForm):
pass
class IconResourcesForm(forms_base.IconResourcesForm):
pass
class Manager:
relations = {}
@classmethod
def get_form(cls, model_name):
"""
Get form class by model's name.
Args:
model_name: model's name
Returns:
"""
return cls.relations[model_name]
@classmethod
def init_data(cls):
module = sys.modules[cls.__module__]
for name in dir(module):
try:
form_class = getattr(module, name)
model_name = form_class.Meta.model.__name__
cls.relations[model_name] = form_class
except Exception, e:
pass
Manager.init_data()
|
<gh_stars>1-10
#!/usr/local/bin/python2 -tt
from scapy.all import *
import struct
MESSAGETYPEOFFSETUDP = 17
MESSAGETYPEOFFSETTCP = 21
DEBUG = True
TGS_REP = chr(13)
def findkerbpayloads(packets, verbose=False):
kploads = []
i = 1
unfinished = {}
for p in packets:
# UDP
if p.haslayer(UDP) and p.sport == 88 and p[UDP].load[MESSAGETYPEOFFSETUDP] == TGS_REP:
if verbose: print "found UDP payload of size %i" % len(p[UDP].load)
kploads.append(p[UDP].load)
#TCP
elif p.haslayer(TCP) and p.sport == 88 and p[TCP].flags & 23== 16: #ACK Only, ignore push (8), urg (32), and ECE (64+128)
# assumes that each TCP packet contains the full payload
try:
payload = p[TCP].load
except:
continue
if len(payload) > MESSAGETYPEOFFSETTCP and payload[MESSAGETYPEOFFSETTCP] == TGS_REP:
# found start of new TGS-REP
size = struct.unpack(">I", payload[:4])[0]
if size + 4 == len(payload):
kploads.append(payload[4:size+4]) # strip the size field
else:
#print 'ERROR: Size is incorrect: %i vs %i' % (size, len(payload))
unfinished[(p[IP].src, p[IP].dst, p[TCP].dport)] = (payload[4:size+4], size)
if verbose: print "found TCP payload of size %i" % size
elif unfinished.has_key((p[IP].src, p[IP].dst, p[TCP].dport)):
ticketdata, size = unfinished.pop((p[IP].src, p[IP].dst, p[TCP].dport))
ticketdata += payload
#print "cont: %i %i" % (len(ticketdata), size)
if len(ticketdata) == size:
kploads.append(ticketdata)
elif len(ticketdata) < size:
unfinished[(p[IP].src, p[IP].dst, p[TCP].dport)] = (ticketdata, size)
else:
# OH NO! Oversized!
print 'Too much data received! Source: %s Dest: %s DPort %i' % (p[IP].src, p[IP].dst, p[TCP].dport)
return kploads
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Find TGS_REP packets in a pcap file and write them for use cracking')
parser.add_argument('-f', '--pcap', dest='pcaps', action='append', required=True,
metavar='PCAPFILE', #type=file, #argparse.FileType('r'),
help='a file to search for Kerberos TGS_REP packets')
parser.add_argument('-w', '--outputfile', dest='outfile', action='store', required=True,
metavar='OUTPUTFILE', type=argparse.FileType('w'),
help='the output file')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False,
help='display verbose messages')
args = parser.parse_args()
kploads = []
for f in args.pcaps:
packets = rdpcap(f)
kploads += findkerbpayloads(packets, args.verbose)
if len(kploads) == 0:
print 'no payloads found'
else:
print 'writing %i hex encoded payloads to %s' % (len(kploads), args.outfile.name)
for p in kploads:
args.outfile.write(p.encode('hex') + '\n')
|
# Copyright 2017 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from distutils.version import LooseVersion
import horovod.tensorflow as hvd
import tensorflow as tf
from horovod.tensorflow.gradient_aggregation import LocalGradientAggregationHelper
from horovod.tensorflow.gradient_aggregation_eager import LocalGradientAggregationHelperEager
from horovod.tensorflow.mpi_ops import rank
_PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion('2.4.0')
def create_distributed_optimizer(keras, optimizer, name, device_dense, device_sparse,
compression, sparse_as_dense, gradient_predivide_factor,
op, backward_passes_per_step=1,
average_aggregated_gradients=False,
num_groups=0):
class _DistributedOptimizer(keras.optimizers.Optimizer):
_HAS_AGGREGATE_GRAD = True
def __init__(self, **kwargs):
self._name = name or "Distributed%s" % self.__class__.__base__.__name__
self._aggregated_gradients = False
self._allreduce_grads = hvd._make_allreduce_grads_fn(
self._name,
device_dense,
device_sparse,
compression,
sparse_as_dense,
op,
gradient_predivide_factor,
num_groups)
self._agg_helper = None
if backward_passes_per_step > 1:
if hvd._executing_eagerly():
self._agg_helper = LocalGradientAggregationHelperEager(
backward_passes_per_step=backward_passes_per_step,
allreduce_func=self._allreduce_grads,
sparse_as_dense=sparse_as_dense,
average_aggregated_gradients=average_aggregated_gradients,
)
else:
self._agg_helper = LocalGradientAggregationHelper(
backward_passes_per_step=backward_passes_per_step,
allreduce_func=self._allreduce_grads,
sparse_as_dense=sparse_as_dense,
average_aggregated_gradients=average_aggregated_gradients,
rank=rank(),
optimizer_type=LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS,
)
super(self.__class__, self).__init__(**kwargs)
def get_gradients(self, loss, params):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
gradients = super(self.__class__, self).get_gradients(loss, params)
return self._allreduce(gradients)
def _aggregate_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
aggregated_grads = self._allreduce(grads)
if _PRE_TF_2_4_0:
# Prior to TF 2.4.0, this function was expected to return only a list of
# grads, not a list of (grad, var) tuples.
return aggregated_grads
return list(zip(aggregated_grads, vars))
def _allreduce(self, grads):
self._aggregated_gradients = True
if self._agg_helper:
return self._agg_helper.compute_gradients(tuple(grads))
else:
return self._allreduce_grads(grads)
def apply_gradients(self, *args, **kwargs):
if self._agg_helper:
if isinstance(args[0], zip):
# If grad_and_vars are passed in as a zip object
# convert to a list. This is necessary for TF2.4+
# b/c args[0] is used in both conditional branches
# inside _agg_helper.apply_gradients().
args = list(args)
args[0] = list(args[0])
args = tuple(args)
results = self._agg_helper.apply_gradients(
lambda: super(self.__class__, self).apply_gradients(*args, **kwargs),
self,
*args,
**kwargs,
)
else:
results = super(self.__class__, self).apply_gradients(*args, **kwargs)
if not self._aggregated_gradients:
raise Exception('`apply_gradients()` was called without a call to '
'`get_gradients()` or `_aggregate_gradients`. If you\'re '
'using TensorFlow 2.0, please specify '
'`experimental_run_tf_function=False` in `compile()`.')
return results
# We dynamically create a new class that inherits from the optimizer that was passed in.
# The goal is to override get_gradients() method with an allreduce implementation.
# This class will have the same name as the optimizer it's wrapping, so that the saved
# model could be easily restored without Horovod.
cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
dict(_DistributedOptimizer.__dict__))
return cls.from_config(optimizer.get_config())
def _eval(backend, op_or_result):
if hvd._executing_eagerly():
return op_or_result
else:
return backend.get_session().run(op_or_result)
if hasattr(hvd, 'broadcast_global_variables'):
def broadcast_global_variables(backend, root_rank):
return _eval(backend, hvd.broadcast_global_variables(root_rank))
def allreduce(backend, value, name, average, prescale_factor, postscale_factor, op, compression):
return _eval(backend, hvd.allreduce(tf.constant(value, name=name), average=average,
prescale_factor=prescale_factor,
postscale_factor=postscale_factor,
op=op, compression=compression))
def allgather(backend, value, name):
return _eval(backend, hvd.allgather(tf.constant(value, name=name)))
def broadcast(backend, value, root_rank, name):
return _eval(backend, hvd.broadcast(tf.constant(value, name=name), root_rank))
def load_model(keras, wrap_optimizer, optimizer_modules, filepath, custom_optimizers, custom_objects):
horovod_objects = {
subclass.__name__.lower(): wrap_optimizer(subclass)
for subclass in keras.optimizers.Optimizer.__subclasses__()
if subclass.__module__ in optimizer_modules
}
if custom_optimizers is not None:
horovod_objects.update({
cls.__name__: wrap_optimizer(cls)
for cls in custom_optimizers
})
if custom_objects is not None:
horovod_objects.update(custom_objects)
return keras.models.load_model(filepath, custom_objects=horovod_objects)
|
from github_webhook import Webhook
from flask import Flask, make_response, request
from json import JSONDecodeError
from protected_repo import ProtectedRepository
from threading import Thread
from mail import Email, FakeEmail
import json
import os
basepath = os.path.dirname(__file__)
CONFIG_FILE = "config.json"
def load_config(filename):
config = {
"secret": "",
"repos": [],
"commit_user": {
"name": "Branch Protection",
"email": "<EMAIL>"
},
"smtp": {
"enabled": False,
},
"notify_emails": []
}
try:
user_config = json.load(open(os.path.join(basepath, filename), mode='r'))
config.update(user_config)
return config
except JSONDecodeError as e:
print("cannot properly parse config file.\n", e)
exit(1)
except FileNotFoundError as e:
print("cannot find config file. create one in config.json\n", e)
exit(1)
def is_github_merge(config, commit):
committer = commit['committer']
bot_user = config['commit_user']
return ((committer['name'] == bot_user['name'] and committer['email'] == bot_user['email']) or
(committer['name'] == "GitHub" and committer['email'] == "<EMAIL>" and committer['username'] == "web-flow"))
def handle_bad_commit(config, request_id, commit, protected_repo, mail):
print("{:s}: Performing corrective actions...".format(request_id))
protected_repo.pull()
protected_repo.push(protected_repo.last_good_commit())
email_addresses = config['notify_emails']
subject = 'Branch Protection Notification'
body = """
Repo `{:s}` with Protected Branch `{:s}` was pushed to. The action has been automagically reverted.
Commit details
==============
SHA: \t{:s}
User: \t{:s}
Email: \t{:s}
Message: \t{:s}
""".format(protected_repo.name, protected_repo.branch, commit["id"], commit["author"]["name"], commit["author"]["email"], commit["message"])
mail.send_notification(email_addresses, subject, body)
config = load_config(CONFIG_FILE)
app = Flask(__name__)
webhook = Webhook(app, secret='' if 'secret' not in config else config['secret'])
repository_maps = [] if 'repos' not in config else config['repos']
mail = FakeEmail()
if config['smtp'] != {} and config['smtp']['enabled']:
host = config['smtp']['host']
port = config['smtp']['port']
username = config['smtp']['username']
password = config['<PASSWORD>']['password']
use_tls = False if 'use_tls' not in config['smtp'] else config['smtp']['use_tls']
use_ssl = False if 'use_ssl' not in config['smtp'] else config['smtp']['use_ssl']
mail = Email(host, port, username, password, smtp_tls=use_tls, smtp_ssl=use_ssl)
# Protected Repository object per repository_map
for repository_map in repository_maps:
name = repository_map['name']
path = os.path.join(basepath, '.cache/', name.lower().replace('/', '_'))
branch = repository_map['branch']
url = repository_map['url']
commit_user_name = config['commit_user']['name']
commit_user_email = config['commit_user']['email']
repo = ProtectedRepository(name, path, url, branch)
repo.set_committer(commit_user_name, commit_user_email)
repo.set_author(commit_user_name, commit_user_email)
repository_map['repo'] = repo
@webhook.hook()
def on_push(data):
# get the request id. if we can't get it, don't bother serving the request
request_id = request.headers.get('X-GitHub-Delivery')
if not request_id:
print("Request with no request ID")
return make_response("No request ID. Ignored.", 302)
try:
# Find a matching repository
repo = {}
for known_repo in repository_maps:
if data['repository']['full_name'] == known_repo['name']:
repo = known_repo
break
if repo == {}:
print("{:s} Remote repository {:s} not configured".format(request_id, data['repository']['full_name']))
return make_response("Unconfigured repository, no action taken", 400)
# Match the branches
ref = data['ref']
branch = ref.split(sep='/')[-1]
if branch != repo['branch']:
return make_response("Unaffected branch", 204)
# Get latest commit from webhook
if 'head_commit' not in data:
print("{:s}: request does not have head commit".format(request_id))
return make_response("No head commit", 400)
commit = data['head_commit']
# If the latest commit is a Github merge, we're good
if is_github_merge(config, commit):
print("Latest commit is a merge request. All good.")
return make_response("Is merge. All good.", 204)
# Oh no, it's not a Gitub merge?
Thread(target=handle_bad_commit, args=(config, request_id, commit, repo['repo'], mail)).start()
except KeyError as e:
print("KeyError when processing hooks\n", e)
return make_response("Can't get the required fields from the data", 400)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
#!/usr/bin/env python3
#
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import inspect
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from cros.factory.tools import image_tool
DEBUG = False
"""Set DEBUG to True to debug this unit test itself.
The major difference is all output will be preserved in /tmp/t.
"""
class ImageToolRMATest(unittest.TestCase):
"""Unit tests for image_tool RMA related commands."""
UPDATER_CONTENT = ('#!/bin/sh\n'
'echo \'{"project": {"host": {"versions": '
'{"ro": "RO", "rw": "RW"}}}}\'\n')
LSB_CONTENT = 'CHROMEOS_RELEASE_VERSION=1.0\nCHROMEOS_RELEASE_BOARD=%s\n'
PARTITION_COMMANDS = [
'%(command)s create %(file)s',
'%(command)s boot -p %(file)s',
'%(command)s add -i 2 -s 1024 -b 34 -t kernel %(file)s',
'%(command)s add -i 3 -s 2048 -b 1058 -t rootfs %(file)s',
'%(command)s add -i 4 -s 1024 -b 3106 -t kernel %(file)s',
'%(command)s add -i 5 -s 2048 -b 4130 -t rootfs %(file)s',
'%(command)s add -i 6 -s 1 -b 6178 -t kernel %(file)s',
'%(command)s add -i 7 -s 1 -b 6179 -t rootfs %(file)s',
'%(command)s add -i 8 -s 1 -b 6180 -t data %(file)s',
'%(command)s add -i 9 -s 1 -b 6181 -t reserved %(file)s',
'%(command)s add -i 10 -s 1 -b 6182 -t reserved %(file)s',
'%(command)s add -i 11 -s 1 -b 6183 -t firmware %(file)s',
'%(command)s add -i 12 -s 1 -b 6184 -t efi %(file)s',
'%(command)s add -i 1 -s 16384 -b 6185 -t data %(file)s',
]
def CheckCall(self, command):
return subprocess.check_call(command, shell=True, cwd=self.temp_dir)
def ImageTool(self, *args):
command = args[0]
if command == image_tool.CMD_NAMESPACE_RMA:
command = args[1]
self.assertIn(command, self.rma_map, 'Unknown command: %s' % command)
cmd = self.rma_map[command](*self.rma_parsers)
else:
self.assertIn(command, self.cmd_map, 'Unknown command: %s' % command)
cmd = self.cmd_map[command](*self.cmd_parsers)
cmd.Init()
cmd_args = self.cmd_parsers[0].parse_args(args)
cmd_args.verbose = 0
cmd_args.subcommand.args = cmd_args
cmd_args.subcommand.Run()
def CreateDiskImage(self, name, lsb_content):
cgpt = image_tool.SysUtils.FindCGPT()
image_path = os.path.join(self.temp_dir, name)
dir_path = os.path.dirname(image_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
self.CheckCall('truncate -s %s %s' % (16 * 1048576, name))
for command in self.PARTITION_COMMANDS:
self.CheckCall(command % dict(command=cgpt, file=name))
with image_tool.GPT.Partition.MapAll(image_path) as f:
self.CheckCall('sudo mkfs -F %sp3' % f)
self.CheckCall('sudo mkfs -F %sp5' % f)
self.CheckCall('sudo mkfs -F %sp1 2048' % f)
with image_tool.Partition(image_path, 3).Mount(rw=True) as d:
fw_path = os.path.join(d, 'usr', 'sbin', 'chromeos-firmwareupdate')
self.CheckCall('sudo mkdir -p %s' % os.path.dirname(fw_path))
tmp_fw_path = os.path.join(self.temp_dir, 'chromeos-firmwareupdate')
with open(tmp_fw_path, 'w') as f:
f.write(self.UPDATER_CONTENT)
self.CheckCall('sudo mv %s %s' % (tmp_fw_path, fw_path))
self.CheckCall('sudo chmod a+rx %s' % fw_path)
common_sh_path = os.path.join(
d, 'usr', 'share', 'misc', 'chromeos-common.sh')
self.CheckCall('sudo mkdir -p %s' % os.path.dirname(common_sh_path))
self.CheckCall('echo "%s" | sudo dd of=%s' %
('#!/bin/sh', common_sh_path))
lsb_path = os.path.join(d, 'etc', 'lsb-release')
self.CheckCall('sudo mkdir -p %s' % os.path.dirname(lsb_path))
self.CheckCall('echo "%s" | sudo dd of=%s' %
(lsb_content.strip('\n'), lsb_path))
write_gpt_path = os.path.join(d, 'usr', 'sbin', 'write_gpt.sh')
self.CheckCall('sudo mkdir -p %s' % os.path.dirname(write_gpt_path))
tmp_write_gpt_path = os.path.join(self.temp_dir, 'write_gpt.sh')
write_command = '\n'.join(
cmd % dict(command=cgpt, file='$1')
for cmd in self.PARTITION_COMMANDS)
with open(tmp_write_gpt_path, 'w') as f:
f.write('\n'.join([
'#!/bin/sh',
'GPT=""',
'GPT="%s"' % cgpt, # Override for unit test.
'write_base_table() {',
write_command,
'}',
]))
self.CheckCall('sudo mv %s %s' % (tmp_write_gpt_path, write_gpt_path))
with image_tool.Partition(image_path, 1).Mount(rw=True) as d:
lsb_path = os.path.join(d, 'dev_image', 'etc', 'lsb-factory')
self.CheckCall('sudo mkdir -p %s' % os.path.dirname(lsb_path))
self.CheckCall('echo "%s" | sudo dd of=%s' %
(lsb_content.strip('\n'), lsb_path))
self.CheckCall('sudo mkdir -p %s' % os.path.join(
d, 'unencrypted', 'import_extensions'))
def SetupBundleEnvironment(self, image_path):
for dir_name in ['factory_shim', 'test_image', 'release_image',
'toolkit', 'hwid', 'complete', 'firmware']:
dir_path = os.path.join(self.temp_dir, dir_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for name in ['release_image', 'test_image', 'factory_shim']:
dest_path = os.path.join(self.temp_dir, name, 'image.bin')
shutil.copy(image_path, dest_path)
with image_tool.Partition(dest_path, 3).Mount(rw=True) as d:
self.CheckCall('echo "%s" | sudo dd of="%s"' %
(name, os.path.join(d, 'tag')))
with image_tool.Partition(dest_path, 1).Mount(rw=True) as d:
self.CheckCall('echo "%s" | sudo dd of="%s"' %
(name, os.path.join(d, 'tag')))
toolkit_path = os.path.join(self.temp_dir, 'toolkit', 'toolkit.run')
with open(toolkit_path, 'w') as f:
f.write('#!/bin/sh\necho Toolkit Version 1.0\n')
os.chmod(toolkit_path, 0o755)
def RemoveBundleEnvironment(self):
for dir_name in ['factory_shim', 'test_image', 'release_image',
'toolkit', 'hwid', 'complete', 'firmware']:
dir_path = os.path.join(self.temp_dir, dir_name)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def setUp(self):
if DEBUG:
self.temp_dir = '/tmp/t'
else:
self.temp_dir = tempfile.mkdtemp(prefix='image_tool_rma_ut_')
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
self.cmd_parsers = (parser, subparser)
self.cmd_map = dict(
(v.name, v) for v in image_tool.__dict__.values()
if inspect.isclass(v) and issubclass(v, image_tool.SubCommand)
and v.namespace is None)
rma_parser = subparser.add_parser(image_tool.CMD_NAMESPACE_RMA)
rma_subparser = rma_parser.add_subparsers()
self.rma_parsers = (rma_parser, rma_subparser)
self.rma_map = dict(
(v.name, v) for v in image_tool.__dict__.values()
if inspect.isclass(v) and issubclass(v, image_tool.SubCommand)
and v.namespace == image_tool.CMD_NAMESPACE_RMA)
def tearDown(self):
if not DEBUG:
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def testRMACommands(self):
"""Test RMA related commands.
To speed up execution time (CreateDiskImage takes ~2s while shutil.copy only
takes 0.1s) we are testing all commands in one single test case.
"""
self.CreateDiskImage('test1.bin', self.LSB_CONTENT % 'test1')
self.CreateDiskImage('test2.bin', self.LSB_CONTENT % 'test2')
image1_path = os.path.join(self.temp_dir, 'test1.bin')
image2_path = os.path.join(self.temp_dir, 'test2.bin')
os.chdir(self.temp_dir)
# `rma create` to create 2 RMA shims.
self.SetupBundleEnvironment(image1_path)
self.ImageTool('rma', 'create', '-o', 'rma1.bin')
self.SetupBundleEnvironment(image2_path)
self.ImageTool('rma', 'create', '-o', 'rma2.bin',
'--active_test_list', 'test')
self.RemoveBundleEnvironment()
# Verify content of RMA shim.
DIR_CROS_PAYLOADS = image_tool.CrosPayloadUtils.GetCrosPayloadsDir()
PATH_CROS_RMA_METADATA = image_tool.CrosPayloadUtils.GetCrosRMAMetadata()
image_tool.Partition('rma1.bin', 1).CopyFile('tag', 'tag.1')
image_tool.Partition('rma1.bin', 3).CopyFile('tag', 'tag.3')
image_tool.Partition('rma1.bin', 1).CopyFile(
os.path.join(DIR_CROS_PAYLOADS, 'test1.json'), self.temp_dir)
image_tool.Partition('rma1.bin', 1).CopyFile(
PATH_CROS_RMA_METADATA, self.temp_dir)
self.assertEqual(open('tag.1').read().strip(), 'factory_shim')
self.assertEqual(open('tag.3').read().strip(), 'factory_shim')
with open('test1.json') as f:
data = json.load(f)
self.assertEqual(data['toolkit']['version'], u'Toolkit Version 1.0')
with open(os.path.basename(PATH_CROS_RMA_METADATA)) as f:
data = json.load(f)
self.assertEqual(data, [{'board': 'test1', 'kernel': 2, 'rootfs': 3}])
# `rma merge` to merge 2 different shims.
self.ImageTool(
'rma', 'merge', '-f', '-o', 'rma12.bin', '-i', 'rma1.bin', 'rma2.bin')
image_tool.Partition('rma12.bin', 1).CopyFile(
PATH_CROS_RMA_METADATA, self.temp_dir)
with open(os.path.basename(PATH_CROS_RMA_METADATA)) as f:
data = json.load(f)
self.assertEqual(data, [{'board': 'test1', 'kernel': 2, 'rootfs': 3},
{'board': 'test2', 'kernel': 4, 'rootfs': 5}])
# `rma merge` to merge a single-board shim with a universal shim.
with image_tool.Partition('rma2.bin', 3).Mount(rw=True) as d:
self.CheckCall('echo "factory_shim_2" | sudo dd of="%s"' %
os.path.join(d, 'tag'))
self.ImageTool(
'rma', 'merge', '-f', '-o', 'rma12_new.bin',
'-i', 'rma12.bin', 'rma2.bin', '--auto_select')
image_tool.Partition('rma12_new.bin', 5).CopyFile('tag', 'tag.5')
self.assertEqual(open('tag.5').read().strip(), 'factory_shim_2')
# `rma extract` to extract a board from a universal shim.
self.ImageTool('rma', 'extract', '-f', '-o', 'extract.bin',
'-i', 'rma12.bin', '-s', '2')
image_tool.Partition('extract.bin', 1).CopyFile(
PATH_CROS_RMA_METADATA, self.temp_dir)
with open(os.path.basename(PATH_CROS_RMA_METADATA)) as f:
data = json.load(f)
self.assertEqual(data, [{'board': 'test2', 'kernel': 2, 'rootfs': 3}])
# `rma replace` to replace the factory shim and toolkit.
factory_shim2_path = os.path.join(self.temp_dir, 'factory_shim2.bin')
shutil.copy(image2_path, factory_shim2_path)
with image_tool.Partition(factory_shim2_path, 3).Mount(rw=True) as d:
self.CheckCall('echo "factory_shim_3" | sudo dd of="%s"' %
os.path.join(d, 'tag'))
toolkit2_path = os.path.join(self.temp_dir, 'toolkit2.run')
with open(toolkit2_path, 'w') as f:
f.write('#!/bin/sh\necho Toolkit Version 2.0\n')
os.chmod(toolkit2_path, 0o755)
self.ImageTool(
'rma', 'replace', '-i', 'rma12.bin', '--board', 'test2',
'--factory_shim', factory_shim2_path, '--toolkit', toolkit2_path)
image_tool.Partition('rma12.bin', 5).CopyFile('tag', 'tag.5')
self.assertEqual(open('tag.5').read().strip(), 'factory_shim_3')
image_tool.Partition('rma12.bin', 1).CopyFile(
os.path.join(DIR_CROS_PAYLOADS, 'test2.json'), self.temp_dir)
with open('test2.json') as f:
data = json.load(f)
self.assertEqual(data['toolkit']['version'], u'Toolkit Version 2.0')
if __name__ == '__main__':
# Support `cros_payload` in bin/ folder.
new_path = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', '..', 'bin'))
os.putenv('PATH', ':'.join(os.getenv('PATH', '').split(':') + [new_path]))
sys.path.append(new_path)
unittest.main()
|
from secml.ml.features.tests import CPreProcessTestCases
from sklearn.preprocessing import Normalizer
from secml.array import CArray
from secml.ml.features.normalization import CNormalizerUnitNorm
from secml.optim.function import CFunction
class TestCNormalizerUnitNorm(CPreProcessTestCases):
"""Unittest for CNormalizerUnitNorm."""
def test_norm_unitnorm(self):
"""Test for CNormalizerUnitNorm."""
norm_type_lst = ["l1", "l2", "max"]
def sklearn_comp(array, norm_type):
self.logger.info("Norm type: {:}".format(norm_type))
self.logger.info("Original array is: {:}".format(array))
# Sklearn normalizer (requires float dtype input)
target = CArray(Normalizer(norm=norm_type).fit_transform(
array.astype(float).get_data()))
# Create our normalizer
result = CNormalizerUnitNorm(norm=norm_type).fit_transform(array)
self.logger.info("Correct result is:\n{:}".format(target))
self.logger.info("Our result is:\n{:}".format(result))
self.assert_array_almost_equal(target, result)
for norm_type in norm_type_lst:
sklearn_comp(self.array_dense, norm_type)
sklearn_comp(self.array_sparse, norm_type)
sklearn_comp(self.row_dense.atleast_2d(), norm_type)
sklearn_comp(self.row_sparse, norm_type)
sklearn_comp(self.column_dense, norm_type)
sklearn_comp(self.column_sparse, norm_type)
def test_chain(self):
"""Test a chain of preprocessors."""
x_chain = self._test_chain(
self.array_dense,
['min-max', 'pca', 'unit-norm'],
[{'feature_range': (-5, 5)}, {}, {}]
)
# Expected shape is (3, 3), as pca max n_components is 4-1
self.assertEqual((self.array_dense.shape[0],
self.array_dense.shape[1] - 1), x_chain.shape)
def _test_gradient(self):
"""Check the normalizer gradient."""
norm_type_lst = ["l1", "l2", "max"]
def compare_analytical_and_numerical_grad(array, norm_type):
def _get_transform_component(x, y):
trans = norm.transform(x).todense()
return trans[y]
norm = CNormalizerUnitNorm(norm=norm_type).fit(array)
if norm_type == "l1":
# if the norm is one we are computing a sub-gradient
decimal = 1
else:
decimal = 4
# check if they are almost equal
self.logger.info("Norm: {:}".format(norm))
# check the gradient comparing it with the numerical one
n_feats = array.size
for f in range(n_feats):
self.logger.info(
"Compare the gradient of feature: {:}".format(f))
# compute analytical gradient
w = CArray.zeros(array.size)
w[f] = 1
an_grad = norm.gradient(array, w=w)
self.logger.info("Analytical gradient is: {:}".format(
an_grad.todense()))
num_grad = CFunction(_get_transform_component).approx_fprime(
array.todense(), epsilon=1e-5, y=f)
self.logger.info("Numerical gradient is: {:}".format(
num_grad.todense()))
self.assert_array_almost_equal(an_grad, num_grad,
decimal=decimal)
for norm_type in norm_type_lst:
compare_analytical_and_numerical_grad(
self.row_dense.ravel(), norm_type=norm_type)
compare_analytical_and_numerical_grad(
self.row_sparse, norm_type=norm_type)
compare_analytical_and_numerical_grad(
(100 * self.row_dense).ravel(), norm_type=norm_type)
compare_analytical_and_numerical_grad(
(100 * self.row_sparse), norm_type=norm_type)
if __name__ == '__main__':
CPreProcessTestCases.main()
|
"""
Oscillators.
"""
import souffle.datatypes as dtt
##### Default constants #####
# Brusselator, unstable regime
BRUSS_A = 1.0
BRUSS_B = 3.0
# Lotka-Volterra
LOTKA_ALPHA = 1.5
LOTKA_BETA = 1.0
LOTKA_GAMMA = 2.0
LOTKA_DELTA = 1.0
# van der Pol oscillator
VANDERPOL_MU = 5.0
VANDERPOL_OMEGA = 1.0
#############################
def brusselator(t, X, **kwargs):
"""
The Brusselator.
@type t: number
@param t: current time
@type X: vector
@param X: current state
@rtype: vector
@return: derivative
"""
x = X[0]
y = X[1]
if len(kwargs) == 0:
x_dot = 1 - (BRUSS_B + 1) * x + BRUSS_A * x**2 * y
y_dot = BRUSS_B * x - BRUSS_A * x**2 * y
elif len(kwargs) != 2:
raise ValueError("Bad kwargs; please provide all of the "\
"following parameters: a, b")
else:
x_dot = 1 - (kwargs["b"] + 1) * x + kwargs["a"] * x**2 * y
y_dot = kwargs["b"] * x - kwargs["a"] * x**2 * y
X_dot = [x_dot, y_dot]
return dtt.Vector(X_dot)
def lotka_volterra(t, X, **kwargs):
"""
The Lotka-Volterra ("predator-prey") equations.
We define the following constants:
alpha = growth rate of prey
beta = rate at which predators consume prey
gamma = death rate of predators
delta = rate at which predators increase by consuming prey
The prey population, x, increases at a rate of dx/dt = Ax, but is consumed
by predators at a rate of dx/dt = -Bxy.
The predator population, y, decreases at a rate of dy/dt = -Cy, but
increases at a rate of dy/dt = Dxy.
@type t: number
@param t: current time
@type X: vector
@param X: current state
@rtype: vector
@return: derivative
"""
x = X[0]
y = X[1]
if len(kwargs) == 0:
x_dot = x * (LOTKA_ALPHA - LOTKA_BETA * y)
y_dot = - y * (LOTKA_GAMMA - LOTKA_DELTA * x)
elif len(kwargs) != 4:
raise ValueError("Bad kwargs; please provide all of the "\
"following parameters: alpha, beta, gamma, delta")
else:
x_dot = x * (kwargs["alpha"] - kwargs["beta"] * y)
y_dot = - y * (kwargs["gamma"] - kwargs["delta"] * x)
X_dot = [x_dot, y_dot]
return dtt.Vector(X_dot)
def vanderpol(t, X, **kwargs):
"""
The van der Pol oscillator. This is a non-conservative oscillator, with
nonlinear damping, that shows up in laser physics and electronic circuits.
The system is described by
d^2x/dx^2 - mu * (1 - x^2) * dx/dt + omega * x = 0
where mu and omega are some constants.
Applying the transformation y = dx/dt, we have the equations of motion
y = dx/dt
dv/dt = mu * (1 - x^2) * v - omega^2 * x
@type t: number
@param t: current time
@type X: vector
@param X: current state
@rtype: vector
@return: derivative
"""
x = X[0]
y = X[1]
if len(kwargs) == 0:
x_dot = y
y_dot = VANDERPOL_MU * (1 - x**2) * y - VANDERPOL_OMEGA**2 * x
elif len(kwargs) != 2:
raise ValueError("Bad kwargs; please provide all of the "\
"following parameters: mu, omega")
else:
x_dot = y
y_dot = kwargs["mu"] * (1 - x**2) * y - kwargs["omega"]**2 * x
X_dot = [x_dot, y_dot]
return dtt.Vector(X_dot)
|
<reponame>MaxTurchin/pycopy-lib
"""
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
try:
CodeType = type(compile("1", "", "eval"))
except:
CodeType = None # TODO: Add better sentinel which can't match anything
MappingProxyType = None # TODO: Add better sentinel which can't match anything
SimpleNamespace = None # TODO: Add better sentinel which can't match anything
def _g():
yield 1
__g = _g()
GeneratorType = type(__g)
# Prevent "generator never ran" warning.
next(__g)
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
# tb = sys.exc_info()[2]
TracebackType = None # TODO: Add better sentinel which can't match anything
FrameType = None # TODO: Add better sentinel which can't match anything
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = None # TODO: Add better sentinel which can't match anything
MemberDescriptorType = None # TODO: Add better sentinel which can't match anything
del sys, _f, _g, _C, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
|
import getpass
import requests
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('username')
parser.add_argument('-p', '--password')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass("Password")
auth = (args.username, args.password)
control_node = '192.168.3.11'
node_map = {
"10.247.164.74": '192.168.3.11',
"10.247.164.76": "192.168.3.11",
"10.247.164.75": "192.168.3.11",
"10.247.164.83": "172.16.58.3",
"": "172.16.31.10",
}
print('\nUPDATING MEMBERSHIP\n')
node_url = 'http://{}:15986/_nodes/couchdb@{{}}'.format(control_node)
for old_node, new_node in node_map.items():
if old_node:
res = requests.get(node_url.format(old_node), auth=auth)
if res.status_code == 200:
rev = res.json()['_rev']
url = node_url.format(old_node)
res = requests.delete('{}?rev={}'.format(url, rev), auth=auth)
print('DELETE node {}'.format(old_node), res.status_code)
res = requests.get(node_url.format(new_node), auth=auth)
if res.status_code != 200:
res = requests.put(node_url.format(new_node), data="{}", auth=auth)
print('ADD node {}'.format(new_node), res.status_code)
print('\nUPDATING DATABASE DOCS\n')
dbs = [
"commcarehq",
"commcarehq__apps",
"commcarehq__auditcare",
"commcarehq__domains",
"commcarehq__fixtures",
"commcarehq__fluff-bihar",
"commcarehq__m4change",
"commcarehq__meta",
"commcarehq__mvp-indicators",
"commcarehq__receiverwrapper",
"commcarehq__users",
]
dbs_url = 'http://{}:15986/_dbs/{{}}'.format(control_node)
for db in dbs:
res = requests.get(dbs_url.format(db), auth=auth)
db_doc = res.text
new_db_doc = db_doc
for old_node, new_node in node_map.items():
if old_node:
new_db_doc = new_db_doc.replace(old_node, new_node)
if db_doc != new_db_doc:
res = requests.put(dbs_url.format( db), data=new_db_doc, auth=auth)
print('UPDATE DB {}'.format(db), res.status_code)
print('\nRE-CREATING SYSTEM DATABASES\n')
system_dbs = [
"_global_changes",
"_replicator",
"_users"
]
for db in system_dbs:
res = requests.get('http://{}:15986/_dbs/{}'.format(control_node, db), auth=auth)
create = res.status_code == 404
if res.status_code == 200:
db_doc = res.json()
create = 'couchdb@10.247.164.12' in db_doc['by_node']
if create:
rev = db_doc['_rev']
res = requests.delete('http://{}:15986/_dbs/{}{}'.format(control_node, db, '?rev={}'.format(rev)), auth=auth)
print('DELETE db {}'.format(db), res.status_code)
if create:
res = requests.put('http://{}:15984/{}'.format(control_node, db), data="{}", auth=auth)
print("CREATE db {}".format(db), res.status_code)
if __name__ == '__main__':
main()
|
# Generates RegisterCodegenUnboxedKernels.cpp, UnboxingFunctions.h and UnboxingFunctions.cpp.
import argparse
import os
import pathlib
from dataclasses import dataclass
from tools.codegen.api import unboxing
from tools.codegen.api.translate import translate
from tools.codegen.api.types import CppSignatureGroup
from tools.codegen.api.unboxing import convert_arguments
from tools.codegen.context import method_with_native_function
from tools.codegen.gen import parse_native_yaml, cpp_string
from tools.codegen.model import NativeFunction, NativeFunctionsGroup, Variant
from tools.codegen.utils import Target, FileManager, mapMaybe, make_file_manager
from typing import Union, Sequence
from typing_extensions import Literal
# Generates UnboxingFunctions.h & UnboxingFunctions.cpp.
@dataclass(frozen=True)
class ComputeUnboxingFunctions:
target: Union[Literal[Target.DECLARATION], Literal[Target.DEFINITION]]
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
if self.target is Target.DECLARATION:
# Note [The ATen Codegen Unboxing API]
# Similar to the ATen Operators API, ATen Codegen Unboxing API lives in the at::unboxing namespace, and
# will be used by codegen unboxing wrappers (CodegenUnboxingWrappers.cpp).
# The Wrappers will be registered into torch::jit::OperatorRegistry using RegisterOperators API.
#
# Important characteristics about the Codegen Unboxing API:
# (1) It follows the OperatorRegistry API.
# This is kind of necessary to avoid overhead.
# For example: if it followed the C++ API, then all of the faithful C++ factory functions
# would need to wrap their arguments into TensorOptions only to unwrap them again.
# (2) Under the hood it calls C++ API.
return f"""
// aten::{f.func}
TORCH_API void {f.func.name.unambiguous_name()}(Stack & stack);
"""
else:
sig_group = CppSignatureGroup.from_native_function(
f, method=(Variant.method in f.variants)
)
sig = sig_group.most_faithful_signature()
# parse arguments into C++ code
binding_list, code_list = convert_arguments(f)
# for each C++ argument, generate the conversion code
code_connector = "\n\t"
arg_connector = ", "
# function call and push back to stack
prefix = "self_base." if sig.method else "at::"
translated_args = translate(binding_list, sig.arguments(), method=sig.method)
args_str = f"{arg_connector.join(e.expr for e in translated_args)}"
if len(f.func.returns) == 0:
ret_str = ""
push_str = ""
else:
ret_str = "auto result_ = "
push_str = """
pack(stack, std::move(result_));
"""
return f"""
// aten::{f.func}
TORCH_API void {f.func.name.unambiguous_name()}(Stack & stack) {{
{code_connector.join(code_list)}
drop(stack, {len(binding_list)});
{ret_str}{prefix}{sig.name()}({args_str});
{push_str}
}}
"""
# Generates RegisterCodegenUnboxedKernels.cpp.
@dataclass(frozen=True)
class ComputeCodegenUnboxedKernels:
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
# We unconditionally generate function wrappers,
sig_group = CppSignatureGroup.from_native_function(
f, method=(Variant.method in f.variants)
)
sig = sig_group.most_faithful_signature()
# escape double quote in schema, get rid of extra double quotes
schema = cpp_string(str(sig.func))[1:-1]
return f"""
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::{schema}"),
[](Stack & stack) {{
RECORD_FUNCTION("{sig.name()}", std::vector<c10::IValue>());
at::unboxing::{unboxing.name(f)}(stack);
}},
aliasAnalysisFromSchema()
),
"""
def gen_unboxing(
*,
native_functions: Sequence[NativeFunction],
cpu_fm: FileManager,
) -> None:
def key_func(fn: Union[NativeFunction, NativeFunctionsGroup]) -> str:
return fn.root_name
cpu_fm.write_sharded(
"UnboxingFunctions.cpp",
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
"definitions": [ComputeUnboxingFunctions(Target.DEFINITION)(fn)]
},
num_shards=5,
sharded_keys={"definitions"},
)
cpu_fm.write(
"UnboxingFunctions.h",
lambda: {
"declarations": list(
mapMaybe(ComputeUnboxingFunctions(Target.DECLARATION), native_functions)
),
},
)
cpu_fm.write_sharded(
"RegisterCodegenUnboxedKernels.cpp",
native_functions,
key_fn=key_func,
env_callable=lambda fn: {"unboxed_ops": [ComputeCodegenUnboxedKernels()(fn)]},
num_shards=5,
sharded_keys={"unboxed_ops"},
)
def main() -> None:
parser = argparse.ArgumentParser(description="Generate unboxing source files")
parser.add_argument(
"-s",
"--source-path",
help="path to source directory for ATen",
default="aten/src/ATen",
)
parser.add_argument(
"-d", "--install_dir", help="output directory", default="build/aten/src/ATen"
)
parser.add_argument(
'-o',
'--output-dependencies',
help='output a list of dependencies into the given file and exit')
parser.add_argument(
'--dry-run', action='store_true',
help='run without writing any files (still updates outputs)')
options = parser.parse_args()
native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
parsed_yaml = parse_native_yaml(native_yaml_path)
native_functions, backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
cpu_fm = make_file_manager(options=options)
gen_unboxing(native_functions=native_functions, cpu_fm=cpu_fm)
if options.output_dependencies:
depfile_path = pathlib.Path(options.output_dependencies).resolve()
depfile_name = depfile_path.name
depfile_stem = depfile_path.stem
path = depfile_path.parent / depfile_name
cpu_fm.write_outputs(depfile_stem, str(path))
if __name__ == "__main__":
main()
|
<reponame>hoffmannmatheus/eaZy<filename>apps/device_controller/zwave_controller.py
import sys, os
import openzwave
from openzwave.node import ZWaveNode
from openzwave.value import ZWaveValue
from openzwave.scene import ZWaveScene
from openzwave.controller import ZWaveController
from openzwave.network import ZWaveNetwork
from openzwave.option import ZWaveOption
from louie import dispatcher, All
from threading import Timer
class ZWaveController():
network = None
def setup(self, updateCallback):
dispatcher.connect(self.onNetworkReady, ZWaveNetwork.SIGNAL_NETWORK_READY)
dispatcher.connect(self.onNetworkStart, ZWaveNetwork.SIGNAL_NETWORK_STARTED)
dispatcher.connect(self.onNetworkFailed, ZWaveNetwork.SIGNAL_NETWORK_FAILED)
# TODO: make udev.symlink rule to a specific port (USB0/1)
# Uncomment this to run on PC (remember to update the zwave config path)
#options = ZWaveOption("/dev/ttyUSB0", \
# config_path="/home/<USER>/software/python-openzwave-0.2.6/openzwave/config", \
options = ZWaveOption("/dev/serial/by-path/platform-bcm2708_usb-usb-0:1.2:1.0-port0", \
config_path="/home/pi/software/python-openzwave-0.2.6/openzwave/config", \
user_path=".", cmd_line="")
options.set_append_log_file(False)
options.set_console_output(False)
options.set_save_log_level('Debug')
options.set_poll_interval(30);
options.set_suppress_value_refresh(False)
options.addOptionBool("AssumeAwake", True)
options.set_logging(False)
options.lock()
self.network = ZWaveNetwork(options, autostart=False)
self.onDeviceUpdateCallback = updateCallback
self.network.start()
self.addedConnections = False
Timer(2*60, self.setupConnections).start()
def tearDown(self):
network.stop()
def getDeviceList(self):
devices = []
for node in self.network.nodes:
if node == 1: continue # don't add the controller
devices.append(self.buildDevice(node))
return devices
def buildDevice(self, node):
dev = {}
dev['id'] = int(self.network.home_id)*1000 + node
dev['type'] = 'unknown'
dev['product_name'] = self.network.nodes[node].product_name
if self.getValueForLabel(node, 'Switch'):
dev['type'] = 'appliance'
val = self.getValueForLabel(node, 'Energy')
dev['consumption_accumulated'] = type(val) != "None" and val or 0
val = self.getValueForLabel(node, 'Power')
dev['consumption_current'] = type(val) != "None" and val or 0
if self.getValueForLabel(node, 'Switch') == 'True':
dev['state'] = 'on'
else:
dev['state'] = 'off'
if self.getValueForLabel(node, 'Sensor'):
dev['type'] = 'sensor'
dev['temperature'] = self.getValueForLabel(node, 'Temperature')
dev['luminance'] = self.getValueForLabel(node, 'Luminance')
dev['presence'] = "undetected"
dev['battery_level'] = self.getValueForLabel(node, 'Battery Level')
return dev
def getValueForLabel(self, node, label):
for v in self.network.nodes[node].values:
if self.network.nodes[node].values[v].label == label:
#self.network.nodes[node].refresh_value(v);
return str(self.network.nodes[node].values[v].data_as_string)
return None
def setDeviceState(self, device_id, state):
node = device_id%1000
if not self.network.nodes[node]: return
for val in self.network.nodes[node].get_switches() :
self.network.nodes[node].set_switch(val, True if state=='on' else False)
def setupConnections(self):
self.addedConnections = True
dispatcher.connect(self.onNodeUpdate, ZWaveNetwork.SIGNAL_NODE)
dispatcher.connect(self.onNodeUpdateValue, ZWaveNetwork.SIGNAL_VALUE)
dispatcher.connect(self.onNodeUpdateValue, ZWaveNetwork.SIGNAL_NODE_EVENT)
dispatcher.connect(self.onNodeUpdateValue, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
dispatcher.connect(self.onNodeUpdateValue, ZWaveNetwork.SIGNAL_VALUE_REFRESHED)
# Event Handlers
def onNetworkStart(self, network):
print("network started : homeid %0.8x - %d nodes were found." % \
(network.home_id, network.nodes_count))
def onNetworkFailed(self, network):
print("network can't load :(")
def onNetworkReady(self, network):
print("network : I'm ready : %d nodes were found." % network.nodes_count)
print("network : my controller is : %s" % network.controller)
self.network = network
if not self.addedConnections:
self.setupConnections()
def onNodeUpdate(self, network, node):
print('node UPDAAAATEEE : %s.' % node)
self.network = network
def onNodeUpdateValue(self, network, node, value):
print('node : %s.' % node)
print('value: %s.' % value)
if node.node_id == 1: return # don't send controller notifications
dev = self.buildDevice(node.node_id)
if type(value) is int:
if dev['type'] == 'sensor' and value == 255:
dev['presence'] = 'detected'
self.network = network
self.onDeviceUpdateCallback(dev)
if type(value) is ZWaveValue:
if dev['type'] == 'appliance' and value.label == 'Switch':
state = value.data and 'on' or 'off'
dev['state'] = state
self.network = network
self.onDeviceUpdateCallback(dev)
if dev['type'] == 'appliance' and value.label == 'Power':
power = str(value.data)
if dev['state'] == 'off' or (dev['state'] == 'on' and float(power) != 0):
dev['consumption_current'] = power
self.network = network
self.onDeviceUpdateCallback(dev)
else:
self.network = network
print('WHAATF do i do with this? %s', power)
if dev['type'] == 'appliance' and value.label == 'Energy':
energy = str(value.data)
dev['consumption_accumulated'] = energy
self.network = network
self.onDeviceUpdateCallback(dev)
if dev['type'] == 'sensor' and value.label == 'Temperature':
temperature = str(value.data)
dev['temperature'] = temperature
self.network = network
self.onDeviceUpdateCallback(dev)
if dev['type'] == 'sensor' and value.label == 'Luminance':
luminance = str(value.data)
dev['luminance'] = luminance
self.network = network
self.onDeviceUpdateCallback(dev)
if value.label == 'Battery Level':
battery = str(value.data)
dev['battery_level'] = battery
self.network = network
self.onDeviceUpdateCallback(dev)
|
<reponame>zizai/gym-electric-motor
import numpy as np
from scipy.optimize import root_scalar
from .conf import default_motor_parameter
import warnings
class _DcMotor(object):
"""
The _DcMotor and its subclasses implement the technical system of a DC motor. \n
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
The state array of the base motor is **[ omega, i_a, i_e ]**.
It has got two input voltages **[u_a, u_e]**.
"""
# Indices for array accesses
OMEGA_IDX = 0
I_A_IDX = 1
I_E_IDX = 2
U_A_IDX = 3
U_E_IDX = 4
@property
def motor_parameter(self):
return self._motor_parameter
def __init__(self, load_eq, motor_parameter=None, load_j=0, motor_type='DcExtEx'):
"""
Basic setting of all the common motor parameters.
Args:
load_eq: Load equation of the load model
motor_parameter(dict or int): Motor parameter set. This can either be an integer or a dictionary.
If an integer is passed the motor parameter file from the conf.py is loaded.
If a dict is passed this will become the parameter file.
"""
motor_param = motor_parameter if motor_parameter is not None else 0
try:
if type(motor_param) is dict:
self._motor_parameter = motor_param
elif type(motor_param) is int:
self._motor_parameter = default_motor_parameter[motor_type][motor_param]
else:
raise TypeError
except TypeError:
warnings.warn('Invalid Type for motor parameter, fell back to default set 0')
self._motor_parameter = default_motor_parameter[0]
except IndexError:
warnings.warn('Invalid Index for motor parameter, fell back to default set 0')
self._motor_parameter = default_motor_parameter[0]
self._model_constants = None
# :Matrix that contains the constant parameters of the systems jacobian for faster computation
self._jac_constants = None
# :The last input voltage at the motor
self.u_in = 0.0
# Write parameters to local variables for speedup
self.u_sup = self.motor_parameter['u_sup']
if 'l_e_prime' in self.motor_parameter:
self.l_e_prime = self.motor_parameter['l_e_prime']
if 'psi_e' in self.motor_parameter:
self.psi_e = self.motor_parameter['psi_e']
self._load_j = load_j
self._update_model()
self.u_in = 0.0
self.u_sup = self.motor_parameter['u_sup']
# Calculate maximum possible values for the state variables
self._update_limits(load_eq)
def induced_voltage(self, state):
"""
The induced voltage of the armature circuit.
Args:
state: The current state array of the motor
Returns:
The voltage induced by the armature in Volt
"""
return self.l_e_prime * state[self.I_E_IDX] * state[self.OMEGA_IDX]
def update_params(self, motor_parameter):
"""
Changes motor parameters and changes directly the internally model parameters, too.
Args:
motor_parameter: Motor parameters that shall be changed
"""
self._motor_parameter.update(motor_parameter)
self.l_e_prime = self._motor_parameter['l_e_prime']
if self._motor_parameter['torque_N'] == 0:
self._motor_parameter['torque_N'] = self.torque(
[self._motor_parameter['omega_N'], self._motor_parameter['i_a_N'], self._motor_parameter['i_e_N']])
self._update_model()
def _update_model(self):
"""
Update the motors model parameters with the motor parameters.
Called internally when the motor parameters are changed or the motor is initialized.
"""
self.l_e_prime = self.motor_parameter['l_e_prime']
self._model_constants = np.array([
[0, 0, self.motor_parameter['l_e_prime'], 0, 0, 0, -1],
[-self.motor_parameter['r_a'], 0, 0, -self.motor_parameter['l_e_prime'], 1, 0, 0],
[0, -self.motor_parameter['r_e'], 0, 0, 0, 1, 0]
])
self._model_constants[self.OMEGA_IDX] /= (self._motor_parameter['j'] + self._load_j)
self._model_constants[self.I_A_IDX] /= self._motor_parameter['l_a']
self._model_constants[self.I_E_IDX] /= self._motor_parameter['l_e']
self._jac_constants = np.array([
[self._motor_parameter['l_e_prime'], self._motor_parameter['l_e_prime'], -1],
[-self._motor_parameter['r_a'], -self._motor_parameter['l_e_prime'], - self._motor_parameter['l_e_prime']],
[0, -self._motor_parameter['r_e'], 0]
])
self._jac_constants[self.OMEGA_IDX] /= (self._motor_parameter['j'] + self._load_j)
self._jac_constants[self.I_A_IDX] /= self._motor_parameter['l_a']
self._jac_constants[self.I_E_IDX] /= self._motor_parameter['l_e']
def torque(self, state):
"""
The torque equation of the motor.
Args:
state: The current state array of the motor
Returns:
The current torque of the motor based on the state.
"""
return self.l_e_prime * state[self.I_A_IDX] * state[self.I_E_IDX]
def torque_max(self, omega):
"""
The speed-torque equation of the motor.
Calculate the maximum possible torque for a given omega for nominal currents.
Args:
omega: The speed of the motor in rad/s
Returns:
The maximal possible torque in Nm for the given speed
"""
return min(self.u_sup**2 / (4 * self.motor_parameter['r_a'] * omega), self.motor_parameter['torque_N'])
def i_a_max(self, omega):
"""
The maximum possible current through the armature circuit for a given speed omega for nominal supply voltage.
Args:
omega: The speed of the motor in rad/s
Returns:
The maximum possible current i_a in Ampere for the given speed
"""
return max(0, min(self.motor_parameter['i_a_N'], self.u_sup *
(1 - self.l_e_prime * omega / self.motor_parameter['r_e']) / self.motor_parameter['r_a']))
def i_in(self, state):
"""
The current flowing into the motor in Amperes.
Args:
state: The current state array of the motor
Returns:
The current flowing into the motor.
"""
return state[self.I_A_IDX], state[self.I_E_IDX]
def model(self, state, t_load, u_in):
"""
The differential system equation.
Args:
state: The state array of the motor.
t_load: The load calculated by the load model
u_in: The input voltages at the terminals [u_a, u_e]
Returns:
The derivative of the state variables omega, i_a, i_e
"""
self.u_in = u_in
return np.matmul(self._model_constants, np.array([
state[self.I_A_IDX],
state[self.I_E_IDX],
state[self.I_A_IDX] * state[self.I_E_IDX],
state[self.OMEGA_IDX] * state[self.I_E_IDX],
u_in[0],
u_in[1],
t_load
]))
def jac(self, state, dtorque_domega):
"""
The jacobian of the model.
Args:
state: The state values of the motor
dtorque_domega: The derivative of the load by omega. Calculated by the load model
Returns:
The jacobian matrix of the current motor state
"""
return self._jac_constants * np.array([
[state[self.I_E_IDX], state[self.I_A_IDX], dtorque_domega],
[1, state[self.OMEGA_IDX], state[self.I_E_IDX]],
[0, 1, 0]
])
def bandwidth(self):
"""
Calculate the bandwidth of the circuits based on the motor parameters
Returns:
Tuple of the armature circuit and excitation circuit bandwidth
"""
return (self.motor_parameter['r_a'] / self.motor_parameter['l_a'],
self.motor_parameter['r_e'] / self.motor_parameter['l_e'])
def _update_limits(self, load_fct):
"""
Calculate for all the missing maximal values the physical maximal possible values considering nominal voltages.
Args:
load_fct: load function of the mechanical model. Must be of type load_fct(omega)
"""
if self._motor_parameter['i_a_N'] == 0.0:
self._motor_parameter['i_a_N'] = self._motor_parameter['u_a_N'] / self._motor_parameter['r_a']
if self.motor_parameter['i_e_N'] == 0.0:
self.motor_parameter['i_e_N'] = self.motor_parameter['u_e_N'] / self.motor_parameter['r_e']
if self.motor_parameter['torque_N'] == 0.0:
self.motor_parameter['torque_N'] = self.torque(
[0, self.motor_parameter['i_a_N'], self.motor_parameter['i_e_N']]
)
# If the torque(omega) will never reach zero set omega max to the value when omega_dot equals 0.001 * omega
try:
max_omega = root_scalar(
lambda omega: self.torque_max(omega) - load_fct(omega),
bracket=[1e-4, 100000.0]).root
except ValueError:
max_omega = root_scalar(
lambda omega: self.torque_max(omega) - load_fct(omega)
/ (self.motor_parameter['J_rotor'] + self._load_j) - 0.001 * omega,
bracket=[1e-4, 100000.0]).root
if self._motor_parameter['omega_N'] == 0:
self._motor_parameter['omega_N'] = max_omega
else:
self.motor_parameter['omega_N'] = min(max_omega, self.motor_parameter['omega_N'])
class DcShuntMotor(_DcMotor):
"""
Class that models a DC shunt motor.
The state array of this motor is: [ omega, torque, i_a, i_e ].
It has got one input voltage: u_in
"""
def __init__(self, load_fct, motor_parameter=None, load_j=0):
super().__init__(load_fct, motor_parameter, load_j, motor_type='DcShunt')
self.u_in = 0
if self.motor_parameter['torque_N'] == 0:
self.motor_parameter['torque_N'] = self.torque(np.array([self.motor_parameter['omega_N'],
self.motor_parameter['i_a_N'],
self.motor_parameter['i_e_N']
]))
def bandwidth(self):
return max(super().bandwidth())
def torque(self, state):
return super().torque(state)
def i_in(self, state):
return state[self.I_A_IDX] + state[self.I_E_IDX]
def model(self, state, t_load, u_in):
dots = super().model(state, t_load, (u_in, u_in))
self.u_in = u_in
return dots
def jac(self, state, dtorque_domega):
return super().jac(state, dtorque_domega)
def torque_max(self, omega):
"""
The speed-torque equation of the motor.
Calculate the maximum possible torque for a given omega for nominal currents.
Args:
omega: The speed of the motor in rad/s
Returns:
The maximal possible torque in Nm for the given speed
"""
return min(self.l_e_prime * self.u_sup**2
* (
(1 - self.l_e_prime / self.motor_parameter['r_e'] * omega)
/ (self.motor_parameter['r_a'] + self.motor_parameter['r_e'])
),
self.motor_parameter['torque_N'])
def _update_limits(self, load_fct):
"""
Calculate for all the missing maximal values the physical maximal possible values considering nominal voltages.
Args:
load_fct: load function of the mechanical model. Must be of type load_fct(omega)
"""
if self.motor_parameter['i_a_N'] == 0.0:
self.motor_parameter['i_a_N'] = self.motor_parameter['u_N'] / self.motor_parameter['r_a']
if self.motor_parameter['i_e_N'] == 0.0:
self.motor_parameter['i_e_N'] = self.motor_parameter['u_N'] / self.motor_parameter['r_e']
if self.motor_parameter['torque_N'] == 0.0:
self.motor_parameter['torque_N'] = self.torque(
[0, self.motor_parameter['i_a_N'], self.motor_parameter['i_e_N']]
)
# If the torque(omega) will never reach zero set omega max to the value when omega_dot equals 0.001 * omega
try:
max_omega = root_scalar(
lambda omega: self.torque_max(omega) - load_fct(omega),
bracket=[1e-4, 100000.0]).root
except ValueError:
max_omega = root_scalar(
lambda omega: (self.torque_max(omega) - load_fct(omega))
/ (self.motor_parameter['J_rotor'] + self._load_j) - 0.001 * omega,
bracket=[1e-4, 100000.0]).root
if self.motor_parameter['omega_N'] == 0:
self.motor_parameter['omega_N'] = max_omega
else:
self.motor_parameter['omega_N'] = min(max_omega, self.motor_parameter['omega_N'])
class DcSeriesMotor(_DcMotor):
"""
Class that models a DC series motor.
The state array of this motor is: [ omega, torque, i ].
It has got one input voltage: u_in
"""
I_IDX = 1
def __init__(self, load_fct, motor_parameter=None, load_j=0):
super().__init__(load_fct, motor_parameter, load_j, motor_type='DcSeries')
self.u_in = 0
self._update_model()
if self.motor_parameter['torque_N'] == 0:
self.motor_parameter['torque_N'] = self.torque(np.array([self.motor_parameter['omega_N'],
self.motor_parameter['i_N'],
self.motor_parameter['i_N']
]))
def induced_voltage(self, state):
return self.l_e_prime * state[self.I_IDX] * state[self.OMEGA_IDX]
def update_params(self, motor_params):
self._motor_parameter.update(motor_params)
self.l_e_prime = self.motor_parameter['l_e_prime']
if self._motor_parameter['torque_N'] == 0:
self._motor_parameter['torque_N'] = self.torque(
[self._motor_parameter['omega_N'], self._motor_parameter['i_N'], self._motor_parameter['i_N']]
)
self._update_model()
def _update_model(self):
self._model_constants = np.array([
[0, 0, self.motor_parameter['l_e_prime'], 0, -1],
[-self.motor_parameter['r_a'] - self.motor_parameter['r_e'], -self.motor_parameter['l_e_prime'], 0, 1, 0]
])
self._model_constants[self.OMEGA_IDX] /= (self._motor_parameter['j'] + self._load_j)
self._model_constants[self.I_IDX] /= (self._motor_parameter['l_a'] + self._motor_parameter['l_e'])
self._jac_constants = np.array([
[2 * self._motor_parameter['l_e_prime'], -1],
[-self._motor_parameter['l_e_prime'] - self._motor_parameter['r_a'] - self._motor_parameter['r_e'],
- self._motor_parameter['l_e_prime']]
])
self._jac_constants[self.OMEGA_IDX] /= (self._motor_parameter['j'] + self._load_j)
self._jac_constants[self.I_IDX] /= (self._motor_parameter['l_a'] + self._motor_parameter['l_e'])
def torque(self, state):
return super().torque([state[self.OMEGA_IDX], state[self.I_IDX], state[self.I_IDX]])
def torque_max(self, omega):
return min(
self.l_e_prime
* (self.u_sup / (self.motor_parameter['r_a'] + self.motor_parameter['r_e'] + self.l_e_prime * omega))**2,
self.motor_parameter['torque_N'])
def i_a_max(self, omega):
return self.u_sup / (self.motor_parameter['r_a'] + self.motor_parameter['r_e'] + self.l_e_prime * omega)
def model(self, state, t_load, u_in):
self.u_in = u_in
return np.matmul(self._model_constants, np.array([
state[self.I_IDX],
state[self.OMEGA_IDX] * state[self.I_IDX],
state[self.I_IDX] ** 2,
u_in,
t_load
]))
def i_in(self, state):
return state[self.I_IDX]
def jac(self, state, load_jac):
return self._jac_constants * np.array([
[1, load_jac],
[state[self.OMEGA_IDX], state[self.I_IDX]]
])
def bandwidth(self):
return ((self.motor_parameter['r_a'] + self.motor_parameter['r_e'])
/ (self.motor_parameter['l_a'] + self.motor_parameter['l_e']))
def _update_limits(self, load_fct):
"""
Calculate for all the missing maximal values the physical maximal possible values considering nominal voltages.
Args:
load_fct: load function of the mechanical model. Must be of type load_fct(omega)
"""
if self.motor_parameter['i_N'] == 0.0:
self.motor_parameter['i_N'] = \
self.motor_parameter['u_N'] / (self.motor_parameter['r_a'] + self.motor_parameter['r_e'])
if self.motor_parameter['torque_N'] == 0.0:
self.motor_parameter['torque_N'] = self.torque([0, self.motor_parameter['i_N']])
# If the torque(omega) will never reach zero set omega max to the value when omega_dot equals 0.001 * omega
try:
max_omega = root_scalar(
lambda omega: self.torque_max(omega) - load_fct(omega),
bracket=[1e-4, 100000.0]).root
except ValueError:
max_omega = root_scalar(
lambda omega: (self.torque_max(omega) - load_fct(omega))
/ (self.motor_parameter['j'] + self._load_j) - 0.001 * omega,
bracket=[1e-4, 100000.0]).root
if self._motor_parameter['omega_N'] == 0:
self._motor_parameter['omega_N'] = max_omega
else:
self.motor_parameter['omega_N'] = min(max_omega, self.motor_parameter['omega_N'])
class DcExternallyExcited(_DcMotor):
"""
The externally excited motor is basically the same as the base motor
"""
def __init__(self, load_fct, motor_params=None, load_j=0):
super().__init__(load_fct, motor_params, load_j, motor_type='DcExtEx')
self.motor_parameter['torque_N'] = self.torque(np.array([self.motor_parameter['omega_N'],
self.motor_parameter['i_a_N'],
self.motor_parameter['i_e_N']
]))
class DcPermanentlyExcited(_DcMotor):
"""
Class that models a permanently excited DC motor.
The state array of this motor is: [ omega, torque, i ].
It has got one input voltage: u_in
"""
I_IDX = 1
def __init__(self, load_fct, motor_parameter=None, load_j=0):
super().__init__(load_fct, motor_parameter, load_j, motor_type='DcPermEx')
self.u_in = 0.0
def induced_voltage(self, state):
return self.psi_e * state[self.OMEGA_IDX]
def torque(self, state):
return self.psi_e * state[self.I_IDX]
def torque_max(self, omega):
return min(
self.motor_parameter['torque_N'],
self.psi_e * (self.u_sup - self.psi_e * omega) / self.motor_parameter['r_a']
)
def i_a_max(self, omega):
return min(self.motor_parameter['i_N'], (self.u_sup - self.psi_e * omega) / self.motor_parameter['r_a'])
def update_params(self, motor_params):
self._motor_parameter.update(motor_params)
self.l_e_prime = self.motor_parameter['l_e_prime']
if self._motor_parameter['torque_N'] == 0:
self._motor_parameter['torque_N'] = self.torque(
[self._motor_parameter['omega_N'], self._motor_parameter['i_N']])
self._update_model()
def _update_model(self):
self._model_constants = np.array([
[0.0, self.motor_parameter['psi_e'], -1.0, 0.0],
[-self.motor_parameter['psi_e'], -self.motor_parameter['r_a'], 0.0, 1.0]
])
self._model_constants[self.OMEGA_IDX] /= (self._motor_parameter['j'] + self._load_j)
self._model_constants[self.I_IDX] /= self._motor_parameter['l_a']
self._jac_constants = np.array([
[-1.0, self.motor_parameter['psi_e']],
[-self.motor_parameter['psi_e'], -self.motor_parameter['r_a']]
])
self._jac_constants[self.OMEGA_IDX] /= (self._motor_parameter['j'] + self._load_j)
self._jac_constants[self.I_IDX] /= self._motor_parameter['l_a']
def i_in(self, state):
return state[self.I_IDX]
def model(self, state, t_load, u_in):
self.u_in = u_in
return np.matmul(self._model_constants, np.array([state[self.OMEGA_IDX], state[self.I_IDX], t_load, u_in]))
def jac(self, state, dtorque_domega):
"""
The Jacobian equation of the motor.
Args:
state: the current motor state
dtorque_domega: the derivative of the load function over omega
Returns:
The solution for the Jacobian equation for the Motor at the given state
"""
return self._jac_constants * np.array([[1.0, dtorque_domega],
[1.0, 1.0]])
def bandwidth(self):
return self.motor_parameter['r_a'] / self.motor_parameter['l_a']
def _update_limits(self, load_fct):
"""
Calculate for all the missing maximal values the physical maximal possible values considering nominal voltages.
Args:
load_fct: load function of the mechanical model. Must be of type load_fct(omega)
"""
if self.motor_parameter['i_N'] == 0.0:
self.motor_parameter['i_N'] = self.motor_parameter['u_N'] / self.motor_parameter['r_a']
if self.motor_parameter['torque_N'] == 0.0:
self.motor_parameter['torque_N'] = self.torque([0, self.motor_parameter['i_N']])
self.motor_parameter['omega_N'] = root_scalar(
lambda omega: self.motor_parameter['psi_e'] * self.motor_parameter['i_N']
* (self.motor_parameter['u_N'] - self.motor_parameter['psi_e'] * omega)
/ self.motor_parameter['r_a'] - load_fct(omega),
bracket=(1e-4, 10000.0)).root
# If the torque(omega) will never reach zero set omega max to the value when omega_dot equals 0.001 * omega
try:
max_omega = root_scalar(
lambda omega: self.torque_max(omega) - load_fct(omega),
bracket=[1e-4, 100000.0]).root
except ValueError:
max_omega = root_scalar(
lambda omega: (self.torque_max(omega) - load_fct(omega))
/ (self.motor_parameter['J_rotor'] + self._load_j) - 0.001 * omega,
bracket=[1e-4, 100000.0]).root
if self._motor_parameter['omega_N'] == 0:
self._motor_parameter['omega_N'] = max_omega
else:
self.motor_parameter['omega_N'] = min(max_omega, self.motor_parameter['omega_N'])
def make(model='Series', load_fct=lambda omega: 0.0, motor_parameter=None, load_j=0):
"""
Dc Motor Factory function.
Args:
model: Define the Dc Motor Type ['Shunt', 'Series', 'ExtEx', 'PermEx']
load_fct: The load function of the load model
motor_parameter: Motor parameters to update
load_j: Loads Moment of inertia in kgm²
Returns:
An instantiated Dc Motor
"""
models = ['DcShunt', 'DcSeries', 'DcExtEx', 'DcPermEx']
assert model in models, "No Model: " + model + "\n Must be in: " + str(models)
typ = {
'DcSeries': DcSeriesMotor,
'DcShunt': DcShuntMotor,
'DcExtEx': DcExternallyExcited,
'DcPermEx': DcPermanentlyExcited
}[model]
return typ(load_fct, motor_parameter, load_j)
|
<gh_stars>0
# Copyright wavedrompy contributors.
# SPDX-License-Identifier: MIT
# Translated to Python from original file:
# https://github.com/drom/wavedrom/blob/master/src/WaveDrom.js
from math import floor
import svgwrite
from .base import SVGBase
from .tspan import TspanParser
class Options:
def __init__(self, vspace=80, hspace=640, lanes=2, bits=32, fontsize=14, bigendian=False, fontfamily='sans-serif',
fontweight='normal'):
self.vspace = vspace if vspace > 19 else 80
self.hspace = hspace if hspace > 39 else 640
self.lanes = lanes if lanes > 0 else 2
self.bits = bits if bits > 4 else 32
self.fontsize = fontsize if fontsize > 5 else 14
self.bigendian = bigendian
self.fontfamily = fontfamily
self.fontweight = fontweight
colors = {2: 0, 3: 80, 4: 170, 5: 45, 6: 126, 7: 215}
def type_style(t):
if t in colors.keys():
return ";fill:hsl({},100%,50%)".format(colors[t])
else:
return ''
class BitField(SVGBase):
def tspan_parse(self, text):
parser = TspanParser()
parser.feed(text)
return parser.get_text()
def hline(self, len, x=0, y=0):
return self.element.line(start=(x,y), end=(x+len,y))
def vline(self, len, x=0, y=0):
return self.element.line(start=(x,y), end=(x,y+len))
def get_text(self, body, x, y=None):
x_list = None
if x:
x_list = [x]
y_list = None
if y:
y_list = [y]
text = self.element.text('', x=x_list, y=y_list)
for t in self.tspan_parse(str(body)):
text.add(t)
return text
def get_label(self, attr, x, y, step=0, length=0):
if isinstance(attr, int):
attr = int(attr)
res = []
for i in range(length):
val = (attr >> i) & 1
xi = x + step * (length / 2 - i - 0.5)
res.append(self.get_text(val, xi, y))
return res
else:
if '\n' in attr:
names = attr.split('\n')
count = len(names)
return [
self.get_text(name, x, y + (-(count - 1) / 2 + i) * self.opt.fontsize)
for (i, name) in enumerate(names)
]
return [self.get_text(attr, x, y)]
def get_attrs(self, e, step, lsbm, msbm):
x = step * (self.mod - ((msbm + lsbm) / 2) - 1)
attr = e['attr']
bits = e['bits']
attrs = [attr]
# 'attr' supports both a scalar and a list.
if isinstance(attr, list):
attrs = attr
return [self.get_label(a, x, 16 * i, step, bits)
for (i, a) in enumerate(attrs)]
def labelArr(self, desc):
step = self.opt.hspace / self.mod
bits = self.container.g(transform="translate({},{})".format(step/2, self.opt.vspace/5))
names = self.container.g(transform="translate({},{})".format(step/2, self.opt.vspace/2+4))
attrs = self.container.g(transform="translate({},{})".format(step/2, self.opt.vspace))
blanks = self.container.g(transform="translate(0,{})".format(self.opt.vspace/4))
for e in desc:
lsbm = 0
msbm = self.mod - 1
lsb = self.index * self.mod
msb = (self.index + 1) * self.mod - 1
if floor(e["lsb"] / self.mod) == self.index:
lsbm = e["lsbm"]
lsb = e["lsb"]
if floor(e["msb"] / self.mod) == self.index:
msb = e["msb"]
msbm = e["msbm"]
else:
if floor(e["msb"] / self.mod) == self.index:
msb = e["msb"]
msbm = e["msbm"]
else:
continue
bits.add(self.get_text(lsb, x=[step*(self.mod-lsbm - 1)]))
if lsbm != msbm:
bits.add(self.get_text(msb, x=[step * (self.mod - msbm - 1)]))
if e.get('name'):
x = step*(self.mod-((msbm+lsbm)/2)-1)
for n in self.get_label(e['name'], x, 0):
names.add(n)
if not e.get('name') or e.get('type'):
style = 'fill-opacity:0.1' + type_style(e.get('type', 0))
insert = [step * (self.mod - msbm - 1), 0]
size = [step * (msbm - lsbm + 1), self.opt.vspace/2]
blanks.add(self.element.rect(insert=insert, size=size, style=style))
if e.get('attr') is not None:
for attr in self.get_attrs(e, step, lsbm, msbm):
for a in attr:
attrs.add(a)
g = self.container.g()
g.add(blanks)
g.add(bits)
g.add(names)
g.add(attrs)
return g
def labels(self, desc):
g = self.container.g(text_anchor='middle')
g.add(self.labelArr(desc))
return g
def cage(self, desc):
hspace = self.opt.hspace
vspace = self.opt.vspace
mod = self.mod
g = self.container.g(stroke='black', stroke_width=1, stroke_linecap='round', transform="translate(0,{})".format(vspace/4))
g.add(self.hline(hspace));
g.add(self.vline(vspace / 2));
g.add(self.hline(hspace, 0, vspace / 2));
i = self.index * mod
for j in range(mod, 0, -1):
if j == mod or any([(e["lsb"] == i) for e in desc]):
g.add(self.vline((vspace / 2), j * (hspace / mod)));
else:
g.add(self.vline((vspace / 16), j * (hspace / mod)));
g.add(self.vline((vspace / 16), j * (hspace / mod), vspace * 7 / 16));
i += 1
return g
def lane(self, desc):
x = 4.5
y = (self.opt.lanes-self.index-1) * self.opt.vspace + 0.5
g = self.container.g(transform = "translate({},{})".format(x, y),
text_anchor = "middle",
font_size = self.opt.fontsize,
font_family = self.opt.fontfamily,
font_weight = self.opt.fontweight)
g.add(self.cage(desc))
g.add(self.labels(desc))
return g
def get_max_attrs(self, desc):
max_count = 0
for e in desc:
if 'attr' in e:
if isinstance(e['attr'], list):
max_count = max(max_count, len(e['attr']))
else:
max_count = max(max_count, 1)
return max_count
def render(self, desc, opt = Options()):
self.opt = opt
# Compute extra per-lane space needed if there are more than one attr
# for any field. This spaces all lanes uniformly, matching the lane
# with the most attr's.
extra_attrs = 0
max_attrs = self.get_max_attrs(desc)
if max_attrs > 1:
extra_attrs = max_attrs - 1
self.extra_attr_space = extra_attrs * 16
width = opt.hspace + 9
height = (opt.vspace + self.extra_attr_space) * opt.lanes + 5
viewbox = { 'viewBox': "0 0 {} {}".format(width, height)}
template = svgwrite.Drawing(id="svgcontent", size=[width, height], **viewbox)
lsb = 0
self.mod = int(opt.bits / opt.lanes)
for e in desc:
e["lsb"] = lsb
e["lsbm"] = lsb % self.mod
lsb += e['bits']
e['msb'] = lsb - 1
e['msbm'] = e['msb'] % self.mod
for i in range(opt.lanes):
self.index = i
template.add(self.lane(desc))
return template
def renderJson(self, source):
opt = Options()
if source.get("options"):
opt = Options(**source['options'])
if source.get("reg"):
return self.render(source['reg'], opt)
|
from collections import defaultdict
import numpy as np
from day import Day
class BeaconVector:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self._facings = None
def __hash__(self):
return hash((self.x, self.y, self.z))
def dist(self, other):
return sum(map(abs, self.diff(other)))
def diff(self, other):
return self.x - other.x, self.y - other.y, self.z - other.z
def __repr__(self):
return str((self.x, self.y, self.z))
@property
def facings(self):
if not self._facings:
self._facings = list(self._make_facings())
return self._facings
def _make_facings(self):
# shamelessly stolen, because i suck at vector math.
vector = np.array([self.x, self.y, self.z])
rotations = list(map(np.array, [
(1, 0, 0),
(-1, 0, 0),
(0, 1, 0),
(0, -1, 0),
(0, 0, 1),
(0, 0, -1),
]))
for vec_x in rotations:
for vec_y in rotations:
if vec_x.dot(vec_y) == 0:
vec_z = np.cross(vec_x, vec_y)
vec = np.matmul(vector, np.array([vec_x, vec_y, vec_z]))
yield BeaconVector(*vec)
class Scanner:
def __init__(self, name, beacons):
self.beacons = beacons
self.name = name
self.facing = 0
self.offset = (0, 0, 0)
def rotated(self, x, y, z):
return [beacon.rotated(x, y, z) for beacon in self.beacons]
def relative_positions(self, face):
rel_pos = defaultdict(set)
for beacon in self.beacons:
for other in self.beacons:
if beacon is other:
continue
rel_pos[beacon].add(beacon.facings[face].diff(other.facings[face]))
return rel_pos
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return self.name
def overlap(self, other):
rel_pos = other.relative_positions(other.facing)
for facing in range(24):
other_pos = self.relative_positions(facing)
for key, value in rel_pos.items():
for key2, value2 in other_pos.items():
# if both have the same neighborhood (at least 11 matching neighbors), we have a match
if len(value & value2) > 10:
matches = key.facings[other.facing].diff(key2.facings[facing])
return matches, facing
return None, None
def normalized(self):
return [(
beacon.facings[self.facing].x + self.offset[0],
beacon.facings[self.facing].y + self.offset[1],
beacon.facings[self.facing].z + self.offset[2],
) for beacon in self.beacons]
class Day19(Day):
def parse(self, content):
scanners = content.split('\n\n')
results = []
for scanner in scanners:
beacons = []
lines = scanner.split('\n')
for line in lines[1:]:
x, y, z = list(map(int, line.split(',')))
beacons.append(BeaconVector(x, y, z))
results.append(Scanner(lines[0], beacons))
return results
def get_unique(self):
all_points = set((beacon.x, beacon.y, beacon.z) for beacon in self.input[0].beacons)
queue = [self.input[0]]
found = {self.input[0]}
while queue:
start = queue.pop(0)
for item in self.input:
if item in found:
# already found the offset and beacons of this scanner
continue
offset, facing = item.overlap(start)
if not offset:
# no match found
continue
item.offset = (
start.offset[0] + offset[0],
start.offset[1] + offset[1],
start.offset[2] + offset[2],
)
item.facing = facing
for point in item.normalized():
all_points.add(point)
# now that we know the offset, we can test other scanners against this
queue.append(item)
found.add(item)
return all_points
def part1(self):
# takes about 10sec ...sorry
return len(self.get_unique())
def part2(self):
# note: requires part1 to be run first (because that sets the offsets on the input scanners)
scanners = [BeaconVector(*scan.offset) for scan in self.input]
return max([scanner.dist(other) for other in scanners for scanner in scanners])
|
<reponame>CryptoRichy/OctoBot<filename>tests/unit_tests/trading_tests/test_order.py
import random
import ccxt
from trading.exchanges.exchange_manager import ExchangeManager
from config.cst import TradeOrderSide, SIMULATOR_LAST_PRICES_TO_CHECK, TraderOrderType, OrderStatus
from tests.test_utils.config import load_test_config
from trading.trader.order import Order, OrderConstants
from trading.trader.trader_simulator import TraderSimulator
class TestOrder:
@staticmethod
def init_default():
config = load_test_config()
exchange_manager = ExchangeManager(config, ccxt.binance, is_simulated=True)
exchange_inst = exchange_manager.get_exchange()
trader_inst = TraderSimulator(config, exchange_inst, 2)
order_inst = Order(trader_inst)
return config, order_inst, trader_inst, exchange_inst
@staticmethod
def stop(trader):
trader.stop_order_manager()
def test_get_profitability(self):
_, order_inst, trader_inst, _ = self.init_default()
# Test filled_price > create_last_price
# test side SELL
order_filled_sup_side_sell_inst = Order(trader_inst)
order_filled_sup_side_sell_inst.side = TradeOrderSide.SELL
order_filled_sup_side_sell_inst.filled_price = 10
order_filled_sup_side_sell_inst.created_last_price = 9
assert order_filled_sup_side_sell_inst.get_profitability() == (-(1 - 10 / 9))
# test side BUY
order_filled_sup_side_sell_inst = Order(trader_inst)
order_filled_sup_side_sell_inst.side = TradeOrderSide.BUY
order_filled_sup_side_sell_inst.filled_price = 15.114778
order_filled_sup_side_sell_inst.created_last_price = 7.265
assert order_filled_sup_side_sell_inst.get_profitability() == (1 - 15.114778 / 7.265)
# Test filled_price < create_last_price
# test side SELL
order_filled_sup_side_sell_inst = Order(trader_inst)
order_filled_sup_side_sell_inst.side = TradeOrderSide.SELL
order_filled_sup_side_sell_inst.filled_price = 11.556877
order_filled_sup_side_sell_inst.created_last_price = 20
assert order_filled_sup_side_sell_inst.get_profitability() == (1 - 20 / 11.556877)
# test side BUY
order_filled_sup_side_sell_inst = Order(trader_inst)
order_filled_sup_side_sell_inst.side = TradeOrderSide.BUY
order_filled_sup_side_sell_inst.filled_price = 8
order_filled_sup_side_sell_inst.created_last_price = 14.35
assert order_filled_sup_side_sell_inst.get_profitability() == (-(1 - 14.35 / 8))
# Test filled_price == create_last_price
# test side SELL
order_filled_sup_side_sell_inst = Order(trader_inst)
order_filled_sup_side_sell_inst.side = TradeOrderSide.SELL
order_filled_sup_side_sell_inst.filled_price = 1517374.4567
order_filled_sup_side_sell_inst.created_last_price = 1517374.4567
assert order_filled_sup_side_sell_inst.get_profitability() == 0
# test side BUY
order_filled_sup_side_sell_inst = Order(trader_inst)
order_filled_sup_side_sell_inst.side = TradeOrderSide.BUY
order_filled_sup_side_sell_inst.filled_price = 0.4275587387858527
order_filled_sup_side_sell_inst.created_last_price = 0.4275587387858527
assert order_filled_sup_side_sell_inst.get_profitability() == 0
self.stop(trader_inst)
def test_check_last_prices(self):
_, order_inst, trader_inst, _ = self.init_default()
# test price in last trades
# test inferior TRUE
max_price = 10
min_price = 4
recent_trades = [{"price": random.uniform(min_price, max_price)}
for _ in range(0, SIMULATOR_LAST_PRICES_TO_CHECK)]
# append validating trade
recent_trades.append({"price": min_price})
order_inst.last_prices = recent_trades
assert order_inst.check_last_prices(max_price, inferior=True)
# test inferior FALSE
max_price = 10.454677
min_price = 2.4273
recent_trades = [{"price": random.uniform(min_price, max_price)}
for _ in range(0, SIMULATOR_LAST_PRICES_TO_CHECK)]
# append validating trade
recent_trades.append({"price": max_price})
order_inst.last_prices = recent_trades
assert order_inst.check_last_prices(random.uniform(min_price, max_price - 1), inferior=False)
# test price not in last trades
# test inferior TRUE
max_price = 7456.15555632315
min_price = 1421.1488845
recent_trades = [{"price": random.uniform(min_price, max_price)}
for _ in range(0, SIMULATOR_LAST_PRICES_TO_CHECK)]
order_inst.last_prices = recent_trades
assert not order_inst.check_last_prices(min_price, inferior=True)
# test inferior FALSE
max_price = 0.0001243753
min_price = 0.000012557753
recent_trades = [{"price": random.uniform(min_price, max_price)}
for _ in range(0, SIMULATOR_LAST_PRICES_TO_CHECK)]
order_inst.last_prices = recent_trades
assert not order_inst.check_last_prices(max_price, inferior=False)
self.stop(trader_inst)
def test_new(self):
config, order_inst, trader_inst, exchange_inst = self.init_default()
# with real trader
order_inst.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_MARKET],
"BTC/USDT",
10000,
1,
price=None,
stop_price=None,
order_notifier=None)
assert order_inst.get_order_type() == OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_MARKET]
assert order_inst.get_order_symbol() == "BTC/USDT"
assert order_inst.get_create_last_price() == 10000
assert order_inst.get_origin_quantity() == 1
assert order_inst.get_creation_time() != 0
assert order_inst.get_currency_and_market() == ('BTC', 'USDT')
assert order_inst.get_side() is None
assert order_inst.get_status() == OrderStatus.OPEN
order_inst.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.STOP_LOSS_LIMIT],
"ETH/BTC",
0.1,
5.2,
price=0.12,
stop_price=0.9,
order_notifier=None)
assert order_inst.origin_stop_price == 0.9
assert order_inst.last_prices is None
assert order_inst.origin_price == 0.12
# with simulated trader
trader_sim_inst = TraderSimulator(config, exchange_inst, 1)
order_sim_inst = Order(trader_sim_inst)
order_sim_inst.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.SELL_MARKET],
"LTC/USDT",
100,
3.22,
price=None,
stop_price=None,
order_notifier=None)
assert order_sim_inst.get_status() == OrderStatus.OPEN
self.stop(trader_inst)
self.stop(trader_sim_inst)
|
<reponame>gonrin/gatco_kafka<gh_stars>0
import asyncio
import confluent_kafka
from confluent_kafka import KafkaException
from time import time
from threading import Thread
__version__ = '0.1.0'
class AIOProducer:
def __init__(self, configs, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._producer = confluent_kafka.Producer(configs)
self._cancelled = False
self._poll_thread = Thread(target=self._poll_loop)
self._poll_thread.start()
def _poll_loop(self):
while not self._cancelled:
self._producer.poll(0.1)
def close(self):
self._cancelled = True
self._poll_thread.join()
def produce(self, topic, value):
"""
An awaitable produce method.
"""
result = self._loop.create_future()
def ack(err, msg):
if err:
self._loop.call_soon_threadsafe(result.set_exception, KafkaException(err))
else:
self._loop.call_soon_threadsafe(result.set_result, msg)
self._producer.produce(topic, value, on_delivery=ack)
return result
def produce2(self, topic, value, on_delivery):
"""
A produce method in which delivery notifications are made available
via both the returned future and on_delivery callback (if specified).
"""
result = self._loop.create_future()
def ack(err, msg):
if err:
self._loop.call_soon_threadsafe(
result.set_exception, KafkaException(err))
else:
self._loop.call_soon_threadsafe(
result.set_result, msg)
if on_delivery:
self._loop.call_soon_threadsafe(
on_delivery, err, msg)
self._producer.produce(topic, value, on_delivery=ack)
return result
class Producer:
def __init__(self, configs):
self._producer = confluent_kafka.Producer(configs)
self._cancelled = False
self._poll_thread = Thread(target=self._poll_loop)
self._poll_thread.start()
def _poll_loop(self):
while not self._cancelled:
self._producer.poll(0.1)
def close(self):
self._cancelled = True
self._poll_thread.join()
def produce(self, topic, value, on_delivery=None):
self._producer.produce(topic, value, on_delivery=on_delivery)
class _KafkaState(object):
"""Remembers configuration for the (db, app) tuple."""
def __init__(self, kafka):
self.kafka = kafka
class Kafka(object):
app = None
producer = None
config = {}
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app, config=None):
self.app = app
if config is None:
self.config = {
"bootstrap.servers": app.config.get("KAFKA_BOOTSTRAP_SERVERS")
}
else:
self.config = config
@app.listener('after_server_start')
async def notify_server_started(app, loop):
self.producer = AIOProducer(self.config, loop=loop)
print('Server successfully started!')
@app.listener('before_server_stop')
async def notify_server_stopping(app, loop):
self.producer.close()
print('Server shutting down!')
if (not hasattr(app, 'extensions')) or (app.extensions is None):
app.extensions = {}
app.extensions['kafka'] = _KafkaState(self)
def get_app(self, reference_app=None):
"""Helper method that implements the logic to look up an
application."""
if reference_app is not None:
return reference_app
if self.app is not None:
return self.app
raise RuntimeError(
'No application found. Either work inside a view function or push'
' an application context.'
) |
import numpy as np
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from torch import sigmoid
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModelWithWandb(Callback):
"""Make WandbLogger watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class ImagePredictionLogger(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# print("_________________________-")
# print(vars(trainer))
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.val_dataloaders[0]))
val_imgs, _val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs).to("cpu")
# preds = torch.argmax(logits, axis=-1)
preds = sigmoid(logits).numpy() # .squeeze()
_label_list = pl_module.labels
preds[preds >= 0.5] = 1
preds[preds < 0.5] = 0
preds = np.argwhere(preds)
pred_labels = {i: [] for i in range(len(val_imgs))}
for _pred in preds:
pred_labels[_pred[0]].append(_label_list[_pred[1]])
pred_labels = [pred_labels[i] for i in range(len(val_imgs))]
_val_labels = _val_labels.to("cpu").numpy()
_val_labels = np.argwhere(_val_labels)
val_labels = {i: [] for i in range(len(val_imgs))}
for _label in _val_labels:
val_labels[_label[0]].append(_label_list[_label[1]])
val_labels = [val_labels[i] for i in range(len(val_imgs))]
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
pred_labels[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
|
<filename>dps/tf/train.py
import tensorflow as tf
import time
from dps import cfg
from dps.train import TrainingLoop, TrainingLoopData
from dps.utils import flush_print as _print, gen_seed
from dps.utils.tf import (
uninitialized_variables_initializer, trainable_variables, walk_variable_scopes
)
class TensorFlowTrainingLoopData(TrainingLoopData):
def store_scalar_summaries(self, mode, path, record, n_global_experiences):
if mode not in self.writers:
self.writers[mode] = tf.summary.FileWriter(path, flush_secs=cfg.reload_interval)
# Build a summary using the Summary protocol buffer
# See https://stackoverflow.com/questions/37902705/how-to-manually-create-a-tf-summary
summary_values = [tf.Summary.Value(tag="all/"+k, simple_value=float(v)) for k, v in record.items()]
summary = tf.Summary(value=summary_values)
self.writers[mode].add_summary(summary, n_global_experiences)
class TensorFlowTrainingLoop(TrainingLoop):
training_loop_data_class = TensorFlowTrainingLoopData
def framework_initialize_stage(self, stack):
# Configure and create session and graph for stage.
session_config = tf.ConfigProto()
session_config.intra_op_parallelism_threads = cfg.get('intra_op_parallelism_threads', 0)
session_config.inter_op_parallelism_threads = cfg.get('inter_op_parallelism_threads', 0)
session_config.log_device_placement = cfg.get('log_device_placement', 0)
if cfg.use_gpu:
per_process_gpu_memory_fraction = getattr(cfg, 'per_process_gpu_memory_fraction', None)
if per_process_gpu_memory_fraction:
session_config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction
gpu_allow_growth = getattr(cfg, 'gpu_allow_growth', None)
if gpu_allow_growth:
session_config.gpu_options.allow_growth = gpu_allow_growth
_print("Using GPU if available.")
_print("Using {}% of GPU memory.".format(
100 * session_config.gpu_options.per_process_gpu_memory_fraction))
_print("Allowing growth of GPU memory: {}".format(session_config.gpu_options.allow_growth))
graph = tf.Graph()
sess = tf.Session(graph=graph, config=session_config)
# This HAS to come after the creation of the session, otherwise
# it allocates all GPU memory if using the GPU.
_print("\nAvailable devices: ")
from tensorflow.python.client import device_lib
_print(device_lib.list_local_devices())
if not cfg.use_gpu:
_print("Not using GPU.")
stack.enter_context(graph.device("/cpu:0"))
stack.enter_context(graph.as_default())
stack.enter_context(sess)
stack.enter_context(sess.as_default())
# Set the seed for the stage.
tf_seed = gen_seed()
_print("Setting tensorflow seed to generated seed: {}\n".format(tf_seed))
tf.set_random_seed(tf_seed)
tf.logging.set_verbosity(tf.logging.ERROR)
def framework_finalize_stage_initialization(self):
self.framework_print_variables()
self.framework_load_weights()
sess = tf.get_default_session()
tf_step = tf.train.get_or_create_global_step()
if cfg.initial_step is not None and cfg.initial_step > 0:
sess.run(tf_step.assign(cfg.initial_step))
sess.run(uninitialized_variables_initializer())
sess.run(tf.assert_variables_initialized())
# Prevent memory leaks, no ops can be added to the graph after this point
tf.get_default_graph().finalize()
def framework_print_variables(self):
walk_variable_scopes(max_depth=cfg.variable_scope_depth)
def framework_load_weights(self):
for var_scope, path in self.get_load_paths():
_print("Loading var scope \"{}\" from {}.".format(var_scope, path))
start = time.time()
variables = {v.name: v for v in trainable_variables(var_scope, for_opt=False)}
if not variables:
_print("No variables to load in scope {}.".format(str(var_scope)))
continue
saver = tf.train.Saver(variables)
saver.restore(tf.get_default_session(), path)
_print("Done loading var scope, took {} seconds.".format(time.time() - start))
|
<filename>userbot/modules/sudo.py
# Copyright 2021 (C) FaridDadashzade.
#
# CyberUserBot - Faridxz
#
# oğurlayan peysərdi #
import os
import re
from userbot.cmdhelp import CmdHelp
from userbot.events import register
from userbot import (
HEROKU_APPNAME,
HEROKU_APIKEY,
SUDO_VERSION,
SUDO_ID,
bot,
)
import heroku3
from telethon.tl.functions.users import GetFullUserRequest
Heroku = heroku3.from_key(HEROKU_APIKEY)
heroku_api = "https://api.heroku.com"
cybersudo = os.environ.get("SUDO_ID", None)
sudosiyahisi = os.environ.get("SUDO_ID", None)
@register(outgoing=True,
pattern=r"^.addsudo")
async def sudoelave(event):
await event.edit("C Y B Ξ R\nİstifadəçi sudo olaraq qeyd edilir...")
cyber = "SUDO_ID"
if HEROKU_APPNAME is not None:
app = Heroku.app(HEROKU_APPNAME)
else:
await event.edit("`C Y B Ξ R:" "\nXahiş edirəm` **HEROKU_APPNAME** dəyərini əlavə edin.")
return
heroku_var = app.config()
if event is None:
return
try:
cybert = await get_user(event)
except Exception:
await event.edit("Xahiş edirəm hir istifadəçiyə cavab verin.")
if cybersudo:
yenisudo = f"{cybersudo} {cybert}"
else:
yenisudo = f"{cybert}"
await event.edit("İstifadəçi sudo olaraq qeyd edildi!\nC Y B Ξ R yenidən başladılır...")
heroku_var[cyber] = yenisudo
@register(outgoing=True,
pattern=r"^.sudosil")
async def sudosil(event):
Heroku = heroku3.from_key(HEROKU_APIKEY)
app = Heroku.app(HEROKU_APPNAME)
heroku_var = app.config()
if not event.is_reply:
return await event.edit("Xahiş edirəm bir istifadəçinin mesajını cavablandırın.")
if event.is_reply:
id = (await event.get_reply_message()).sender_id
ad = (await bot.get_entity(id)).first_name
op = re.search(str(id), str(sudosiyahisi))
if op:
i = ""
faridxz = sudosiyahisi.split(" ")
faridxz.remove(str(id))
i += str(faridxz)
x = i.replace("[", "")
xx = x.replace("]", "")
xxx = xx.replace(",", "")
hazir = xxx.replace("'", "")
heroku_var["SUDO_ID"] = hazir
await event.edit(f"`{ad}` adlı istifadəçinin icazəsi alındı.\nC Y B Ξ R yenidən başladılır...")
else:
await event.edit(f"Bağışlayın, `{ad}` istifadəçi sudo olaraq qeyd olunmayıb!")
if heroku_var["SUDO_ID"] == None:
await event.edit(f"`Sudo siyahısı boşdur!`")
async def get_user(event):
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.forward:
replied_user = await event.client(
GetFullUserRequest(previous_message.forward.sender_id)
)
else:
replied_user = await event.client(
GetFullUserRequest(previous_message.sender_id)
)
cybert = replied_user.user.id
return cybert
Help = CmdHelp('sudo')
Help.add_command('addsudo', None, 'Cavab verdiyiniz istifadəçini botunuzda admin edər.')
Help.add_command('sudosil', None, 'Cavab verdiyiniz istifadəçinin botunuzda olan adminliyini alar.')
Help.add()
|
"""
Test for RFlink sensor components.
Test setup of rflink sensor component/platform. Verify manual and
automatic sensor creation.
"""
from datetime import timedelta
from homeassistant.components.rflink import CONF_RECONNECT_INTERVAL
from homeassistant.const import (
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed
from tests.components.rflink.test_init import mock_rflink
DOMAIN = "binary_sensor"
CONFIG = {
"rflink": {
"port": "/dev/ttyABC0",
"ignore_devices": ["ignore_wildcard_*", "ignore_sensor"],
},
DOMAIN: {
"platform": "rflink",
"devices": {
"test": {"name": "test", "device_class": "door"},
"test2": {
"name": "test2",
"device_class": "motion",
"off_delay": 30,
"force_update": True,
},
},
},
}
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the rflink sensor component."""
# setup mocking rflink module
event_callback, create, _, _ = await mock_rflink(hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]["ignore"]
# test default state of sensor loaded from config
config_sensor = hass.states.get("binary_sensor.test")
assert config_sensor
assert config_sensor.state == STATE_OFF
assert config_sensor.attributes["device_class"] == "door"
# test on event for config sensor
event_callback({"id": "test", "command": "on"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_ON
# test off event for config sensor
event_callback({"id": "test", "command": "off"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_OFF
# test allon event for config sensor
event_callback({"id": "test", "command": "allon"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_ON
# test alloff event for config sensor
event_callback({"id": "test", "command": "alloff"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_OFF
async def test_entity_availability(hass, monkeypatch):
"""If Rflink device is disconnected, entities should become unavailable."""
# Make sure Rflink mock does not 'recover' to quickly from the
# disconnect or else the unavailability cannot be measured
config = CONFIG
failures = [True, True]
config[CONF_RECONNECT_INTERVAL] = 60
# Create platform and entities
_, _, _, disconnect_callback = await mock_rflink(
hass, config, DOMAIN, monkeypatch, failures=failures
)
# Entities are available by default
assert hass.states.get("binary_sensor.test").state == STATE_OFF
# Mock a disconnect of the Rflink device
disconnect_callback()
# Wait for dispatch events to propagate
await hass.async_block_till_done()
# Entity should be unavailable
assert hass.states.get("binary_sensor.test").state == STATE_UNAVAILABLE
# Reconnect the Rflink device
disconnect_callback()
# Wait for dispatch events to propagate
await hass.async_block_till_done()
# Entities should be available again
assert hass.states.get("binary_sensor.test").state == STATE_OFF
async def test_off_delay(hass, monkeypatch):
"""Test off_delay option."""
# setup mocking rflink module
event_callback, create, _, _ = await mock_rflink(hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]["ignore"]
events = []
on_event = {"id": "test2", "command": "on"}
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
now = dt_util.utcnow()
# fake time and turn on sensor
future = now + timedelta(seconds=0)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
event_callback(on_event)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_ON
assert len(events) == 1
# fake time and turn on sensor again
future = now + timedelta(seconds=15)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
event_callback(on_event)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_ON
assert len(events) == 2
# fake time and verify sensor still on (de-bounce)
future = now + timedelta(seconds=35)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_ON
assert len(events) == 2
# fake time and verify sensor is off
future = now + timedelta(seconds=45)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_OFF
assert len(events) == 3
|
<gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from models.BaseModel import BaseModel
class DownsamplerBlock(nn.Module):
def __init__(self, ninput, noutput):
super(DownsamplerBlock, self).__init__()
self.ninput = ninput
self.noutput = noutput
if self.ninput < self.noutput:
self.conv = nn.Conv2d(ninput, noutput-ninput,
kernel_size=3, stride=2, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
else:
self.conv = nn.Conv2d(ninput, noutput,
kernel_size=3, stride=2, padding=1)
self.bn = nn.BatchNorm2d(noutput)
def forward(self, input):
if self.ninput < self.noutput:
output = torch.cat([self.conv(input), self.pool(input)], 1)
else:
output = self.conv(input)
output = self.bn(output)
return F.relu(output)
class EDABlock(nn.Module):
def __init__(self, ninput, dilated, k=40, dropprob=0.02):
super(EDABlock, self).__init__()
self.conv1x1 = nn.Conv2d(ninput, k, kernel_size=1)
self.bn0 = nn.BatchNorm2d(k)
self.conv3x1_1 = nn.Conv2d(k, k, kernel_size=(3,1), padding=(1,0))
self.conv1x3_1 = nn.Conv2d(k, k, kernel_size=(1,3), padding=(0,1))
self.bn1 = nn.BatchNorm2d(k)
# ConvLayer with dilated_rate padding [(kernel_size-1)/2]*(dilated_rate-1)+1
# ConvLayer (kernel_size-1)/2
self.conv3x1_2 = nn.Conv2d(k, k, kernel_size=(3,1), padding=(dilated,0), dilation=dilated)
self.conv1x3_2 = nn.Conv2d(k, k, kernel_size=(1,3), padding=(0,dilated), dilation=dilated)
self.bn2 = nn.BatchNorm2d(k)
self.dropout = nn.Dropout2d(dropprob)
def forward(self, input):
x = input
output = self.conv1x1(input)
output = self.bn0(output)
output = F.relu(output)
output = self.conv3x1_1(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = F.relu(output)
output = self.conv3x1_2(output)
output = self.conv1x3_2(output)
output = self.bn2(output)
output = F.relu(output)
if self.dropout.p != 0:
output = self.dropout(output)
output = torch.cat([output, x], 1)
return output
class EDANet(BaseModel):
def __init__(self, config):
super(EDANet, self).__init__()
self.name='EDANet'
self.nb_classes = config.nb_classes
self.layers = nn.ModuleList()
# for stage1
self.dilation1 = [1,1,1,2,2]
# for stage2
self.dilation2 = [2,2,4,4,8,8,16,16]
self.layers.append(DownsamplerBlock(3,15))
self.layers.append(DownsamplerBlock(15,60))
for i in range(5):
self.layers.append(EDABlock(60+40*i, self.dilation1[i]))
self.layers.append(DownsamplerBlock(260,130))
for j in range(8):
self.layers.append(EDABlock(130+40*j, self.dilation2[j]))
# projection layer
self.project_layer = nn.Conv2d(450, self.nb_classes, kernel_size=1)
self.weights_init()
def weights_init(self):
for idx, m in enumerate(self.modules()):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self,x):
output = x
for layer in self.layers:
output = layer(output)
output = self.project_layer(output)
# bilinear interpolation x8
output = F.interpolate(output, scale_factor=8, mode='bilinear', align_corners=True)
# bilinear interpolation x2
#if not self.training:
# output = F.interpolate(output, scale_factor=2, mode='bilinear', align_corners=True)
return output
if __name__ == '__main__':
input = torch.randn(1,3,512,512)
# for the inference only
model = EDANet().eval()
print(model)
output = model(input)
print(output.shape)
|
<gh_stars>0
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from time import time as _time
import threading
import multiprocessing
from paddle_serving_client import MultiLangClient, Client
from concurrent import futures
import logging
import func_timeout
import os
import sys
import numpy as np
from numpy import *
from .proto import pipeline_service_pb2
from .channel import (ThreadChannel, ProcessChannel, ChannelDataEcode,
ChannelData, ChannelDataType, ChannelStopError,
ChannelTimeoutError)
from .util import NameGenerator
from .profiler import UnsafeTimeProfiler as TimeProfiler
_LOGGER = logging.getLogger()
_op_name_gen = NameGenerator("Op")
class Op(object):
def __init__(self,
name=None,
input_ops=[],
server_endpoints=[],
fetch_list=[],
client_config=None,
concurrency=1,
timeout=-1,
retry=1,
batch_size=1,
auto_batching_timeout=None):
if name is None:
name = _op_name_gen.next()
self.name = name # to identify the type of OP, it must be globally unique
self.concurrency = concurrency # amount of concurrency
self.set_input_ops(input_ops)
self._server_endpoints = server_endpoints
self.with_serving = False
if len(self._server_endpoints) != 0:
self.with_serving = True
self._client_config = client_config
self._fetch_names = fetch_list
if timeout > 0:
self._timeout = timeout / 1000.0
else:
self._timeout = -1
self._retry = max(1, retry)
self._input = None
self._outputs = []
self._batch_size = batch_size
self._auto_batching_timeout = auto_batching_timeout
if self._auto_batching_timeout is not None:
if self._auto_batching_timeout <= 0 or self._batch_size == 1:
_LOGGER.warning(
self._log(
"Because auto_batching_timeout <= 0 or batch_size == 1,"
" set auto_batching_timeout to None."))
self._auto_batching_timeout = None
else:
self._auto_batching_timeout = self._auto_batching_timeout / 1000.0
if not isinstance(self, RequestOp) and not isinstance(self, ResponseOp):
_LOGGER.info(
self._log("\n\tinput_ops: {},"
"\n\tserver_endpoints: {}"
"\n\tfetch_list: {}"
"\n\tclient_config: {}"
"\n\tconcurrency: {},"
"\n\ttimeout(s): {},"
"\n\tretry: {},"
"\n\tbatch_size: {},"
"\n\tauto_batching_timeout(s): {}".format(
", ".join([op.name for op in input_ops
]), self._server_endpoints,
self._fetch_names, self._client_config,
self.concurrency, self._timeout, self._retry,
self._batch_size, self._auto_batching_timeout)))
self._server_use_profile = False
# only for thread op
self._for_init_op_lock = threading.Lock()
self._for_close_op_lock = threading.Lock()
self._succ_init_op = False
self._succ_close_op = False
def use_default_auto_batching_config(self):
if self._batch_size != 1:
_LOGGER.warning("Op({}) reset batch_size=1 (original: {})"
.format(self.name, self._batch_size))
self._batch_size = 1
if self._auto_batching_timeout != None:
_LOGGER.warning(
"Op({}) reset auto_batching_timeout=None (original: {})"
.format(self.name, self._auto_batching_timeout))
self._auto_batching_timeout = None
def use_profiler(self, use_profile):
self._server_use_profile = use_profile
def init_client(self, client_type, client_config, server_endpoints,
fetch_names):
if self.with_serving == False:
_LOGGER.info("Op({}) has no client (and it also do not "
"run the process function".format(self.name))
return None
if client_type == 'brpc':
client = Client()
client.load_client_config(client_config)
elif client_type == 'grpc':
client = MultiLangClient()
else:
raise ValueError("Failed to init client: unknow client "
"type {}".format(client_type))
client.connect(server_endpoints)
self._fetch_names = fetch_names
return client
def get_input_ops(self):
return self._input_ops
def set_input_ops(self, ops):
if not isinstance(ops, list):
ops = [] if ops is None else [ops]
self._input_ops = []
for op in ops:
if not isinstance(op, Op):
_LOGGER.critical(
self._log("Failed to set input_ops: input op "
"must be Op type, not {}".format(type(op))))
os._exit(-1)
self._input_ops.append(op)
def add_input_channel(self, channel):
if not isinstance(channel, (ThreadChannel, ProcessChannel)):
_LOGGER.critical(
self._log("Failed to set input_channel: input "
"channel must be Channel type, not {}".format(
type(channel))))
os._exit(-1)
channel.add_consumer(self.name)
self._input = channel
def clean_input_channel(self):
self._input = None
def _get_input_channel(self):
return self._input
def add_output_channel(self, channel):
if not isinstance(channel, (ThreadChannel, ProcessChannel)):
_LOGGER.critical(
self._log("Failed to add output_channel: output channel "
"must be Channel type, not {}".format(type(channel))))
os._exit(-1)
channel.add_producer(self.name)
self._outputs.append(channel)
def clean_output_channels(self):
self._outputs = []
def _get_output_channels(self):
return self._outputs
def preprocess(self, input_dicts):
# multiple previous Op
if len(input_dicts) != 1:
_LOGGER.critical(
self._log(
"Failed to run preprocess: this Op has multiple previous "
"inputs. Please override this func."))
os._exit(-1)
(_, input_dict), = input_dicts.items()
return input_dict
def process(self, feed_batch):
err, err_info = ChannelData.check_batch_npdata(feed_batch)
if err != 0:
_LOGGER.critical(
self._log("Failed to run process: {}. Please override "
"preprocess func.".format(err_info)))
os._exit(-1)
call_result = self.client.predict(
feed=feed_batch, fetch=self._fetch_names)
if isinstance(self.client, MultiLangClient):
if call_result is None or call_result["serving_status_code"] != 0:
return None
call_result.pop("serving_status_code")
return call_result
def postprocess(self, input_dict, fetch_dict):
return fetch_dict
def _parse_channeldata(self, channeldata_dict):
data_id, error_channeldata = None, None
client_need_profile, profile_set = False, set()
parsed_data = {}
key = list(channeldata_dict.keys())[0]
data_id = channeldata_dict[key].id
client_need_profile = channeldata_dict[key].client_need_profile
for name, data in channeldata_dict.items():
if data.ecode != ChannelDataEcode.OK.value:
error_channeldata = data
break
parsed_data[name] = data.parse()
if client_need_profile:
profile_set |= data.profile_data_set
return (data_id, error_channeldata, parsed_data, client_need_profile,
profile_set)
def _push_to_output_channels(self,
data,
channels,
name=None,
profile_str=None,
client_need_profile=False,
profile_set=None):
if name is None:
name = self.name
# add profile into channeldata
if client_need_profile and profile_set is not None:
if profile_str is not None:
profile_set.add(profile_str)
data.add_profile(profile_set)
for channel in channels:
channel.push(data, name)
def start_with_process(self, client_type):
proces = []
for concurrency_idx in range(self.concurrency):
p = multiprocessing.Process(
target=self._run,
args=(concurrency_idx, self._get_input_channel(),
self._get_output_channels(), client_type, False))
p.start()
proces.append(p)
return proces
def start_with_thread(self, client_type):
threads = []
for concurrency_idx in range(self.concurrency):
t = threading.Thread(
target=self._run,
args=(concurrency_idx, self._get_input_channel(),
self._get_output_channels(), client_type, True))
# When a process exits, it attempts to terminate
# all of its daemonic child processes.
t.daemon = True
t.start()
threads.append(t)
return threads
def init_op(self):
pass
def _run_preprocess(self, parsed_data_dict, op_info_prefix):
_LOGGER.debug("{} Running preprocess".format(op_info_prefix))
preped_data_dict = {}
err_channeldata_dict = {}
for data_id, parsed_data in parsed_data_dict.items():
preped_data, error_channeldata = None, None
try:
preped_data = self.preprocess(parsed_data)
except TypeError as e:
# Error type in channeldata.datatype
error_info = "(logid={}) {} Failed to preprocess: {}".format(
data_id, op_info_prefix, e)
_LOGGER.error(error_info, exc_info=True)
error_channeldata = ChannelData(
ecode=ChannelDataEcode.TYPE_ERROR.value,
error_info=error_info,
data_id=data_id)
except Exception as e:
error_info = "(logid={}) {} Failed to preprocess: {}".format(
data_id, op_info_prefix, e)
_LOGGER.error(error_info, exc_info=True)
error_channeldata = ChannelData(
ecode=ChannelDataEcode.UNKNOW.value,
error_info=error_info,
data_id=data_id)
if error_channeldata is not None:
err_channeldata_dict[data_id] = error_channeldata
else:
preped_data_dict[data_id] = preped_data
_LOGGER.debug("{} Succ preprocess".format(op_info_prefix))
return preped_data_dict, err_channeldata_dict
def _run_process(self, preped_data_dict, op_info_prefix):
_LOGGER.debug("{} Running process".format(op_info_prefix))
midped_data_dict = {}
err_channeldata_dict = {}
if self.with_serving:
data_ids = preped_data_dict.keys()
feed_batch = [preped_data_dict[data_id] for data_id in data_ids]
midped_batch = None
ecode = ChannelDataEcode.OK.value
if self._timeout <= 0:
try:
midped_batch = self.process(feed_batch)
except Exception as e:
ecode = ChannelDataEcode.UNKNOW.value
error_info = "{} Failed to process(batch: {}): {}".format(
op_info_prefix, data_ids, e)
_LOGGER.error(error_info, exc_info=True)
else:
for i in range(self._retry):
try:
midped_batch = func_timeout.func_timeout(
self._timeout, self.process, args=(feed_batch, ))
except func_timeout.FunctionTimedOut as e:
if i + 1 >= self._retry:
ecode = ChannelDataEcode.TIMEOUT.value
error_info = "{} Failed to process(batch: {}): " \
"exceeded retry count.".format(
op_info_prefix, data_ids)
_LOGGER.error(error_info)
else:
_LOGGER.warning(
"{} Failed to process(batch: {}): timeout, and retrying({}/{})"
.format(op_info_prefix, data_ids, i + 1,
self._retry))
except Exception as e:
ecode = ChannelDataEcode.UNKNOW.value
error_info = "{} Failed to process(batch: {}): {}".format(
op_info_prefix, data_ids, e)
_LOGGER.error(error_info, exc_info=True)
break
else:
break
if ecode != ChannelDataEcode.OK.value:
for data_id in data_ids:
_LOGGER.error("(logid={}) {}".format(data_id, error_info))
err_channeldata_dict[data_id] = ChannelData(
ecode=ecode, error_info=error_info, data_id=data_id)
elif midped_batch is None:
# op client return None
error_info = "{} Failed to predict, please check if PaddleServingService" \
" is working properly.".format(op_info_prefix)
for data_id in data_ids:
_LOGGER.error("(logid={}) {}".format(data_id, error_info))
err_channeldata_dict[data_id] = ChannelData(
ecode=ChannelDataEcode.CLIENT_ERROR.value,
error_info=error_info,
data_id=data_id)
else:
# transform np format to dict format
for idx, data_id in enumerate(data_ids):
midped_data_dict[data_id] = {
k: v[idx]
for k, v in midped_batch.items()
}
else:
midped_data_dict = preped_data_dict
_LOGGER.debug("{} Succ process".format(op_info_prefix))
return midped_data_dict, err_channeldata_dict
def _run_postprocess(self, parsed_data_dict, midped_data_dict,
op_info_prefix):
_LOGGER.debug("{} Running postprocess".format(op_info_prefix))
postped_data_dict = {}
err_channeldata_dict = {}
for data_id, midped_data in midped_data_dict.items():
postped_data, err_channeldata = None, None
try:
postped_data = self.postprocess(parsed_data_dict[data_id],
midped_data)
except Exception as e:
error_info = "(logid={}) {} Failed to postprocess: {}".format(
data_id, op_info_prefix, e)
_LOGGER.error(error_info, exc_info=True)
err_channeldata = ChannelData(
ecode=ChannelDataEcode.UNKNOW.value,
error_info=error_info,
data_id=data_id)
if err_channeldata is not None:
err_channeldata_dict[data_id] = err_channeldata
continue
else:
if not isinstance(postped_data, dict):
error_info = "(logid={}) {} Failed to postprocess: " \
"output of postprocess funticon must be " \
"dict type, but get {}".format(
data_id, op_info_prefix,
type(postped_data))
_LOGGER.error(error_info)
err_channeldata = ChannelData(
ecode=ChannelDataEcode.UNKNOW.value,
error_info=error_info,
data_id=data_id)
err_channeldata_dict[data_id] = err_channeldata
continue
output_data = None
err, _ = ChannelData.check_npdata(postped_data)
if err == 0:
output_data = ChannelData(
ChannelDataType.CHANNEL_NPDATA.value,
npdata=postped_data,
data_id=data_id)
else:
output_data = ChannelData(
ChannelDataType.DICT.value,
dictdata=postped_data,
data_id=data_id)
postped_data_dict[data_id] = output_data
_LOGGER.debug("{} Succ postprocess".format(op_info_prefix))
return postped_data_dict, err_channeldata_dict
def _auto_batching_generator(self, input_channel, op_name, batch_size,
timeout, op_info_prefix):
while True:
batch = []
while len(batch) == 0:
endtime = None
if timeout is not None:
endtime = _time() + timeout
for idx in range(batch_size):
try:
channeldata_dict = None
if timeout is not None:
remaining = endtime - _time()
if remaining <= 0.0:
_LOGGER.debug("{} Failed to generate batch: "
"timeout".format(op_info_prefix))
break
channeldata_dict = input_channel.front(op_name,
timeout)
else:
channeldata_dict = input_channel.front(op_name)
batch.append(channeldata_dict)
except ChannelTimeoutError:
_LOGGER.debug("{} Failed to generate batch: "
"timeout".format(op_info_prefix))
break
_LOGGER.debug("{} Got actual batch_size: {}".format(op_info_prefix,
len(batch)))
yield batch
def _parse_channeldata_batch(self, batch, output_channels):
parsed_data_dict = {}
need_profile_dict = {}
profile_dict = {}
for channeldata_dict in batch:
(data_id, error_channeldata, parsed_data,
client_need_profile, profile_set) = \
self._parse_channeldata(channeldata_dict)
if error_channeldata is None:
parsed_data_dict[data_id] = parsed_data
need_profile_dict[data_id] = client_need_profile
profile_dict[data_id] = profile_set
else:
# error data in predecessor Op
# (error_channeldata with profile info)
self._push_to_output_channels(error_channeldata,
output_channels)
return parsed_data_dict, need_profile_dict, profile_dict
def _run(self, concurrency_idx, input_channel, output_channels, client_type,
is_thread_op):
op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
tid = threading.current_thread().ident
# init op
profiler = None
try:
profiler = self._initialize(is_thread_op, client_type,
concurrency_idx)
except Exception as e:
_LOGGER.critical(
"{} Failed to init op: {}".format(op_info_prefix, e),
exc_info=True)
os._exit(-1)
_LOGGER.info("{} Succ init".format(op_info_prefix))
batch_generator = self._auto_batching_generator(
input_channel=input_channel,
op_name=self.name,
batch_size=self._batch_size,
timeout=self._auto_batching_timeout,
op_info_prefix=op_info_prefix)
start_prep, end_prep = None, None
start_midp, end_midp = None, None
start_postp, end_postp = None, None
while True:
try:
channeldata_dict_batch = next(batch_generator)
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
# parse channeldata batch
try:
parsed_data_dict, need_profile_dict, profile_dict \
= self._parse_channeldata_batch(
channeldata_dict_batch, output_channels)
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
if len(parsed_data_dict) == 0:
# data in the whole batch is all error data
continue
# preprecess
start_prep = profiler.record("prep#{}_0".format(op_info_prefix))
preped_data_dict, err_channeldata_dict \
= self._run_preprocess(parsed_data_dict, op_info_prefix)
end_prep = profiler.record("prep#{}_1".format(op_info_prefix))
_LOGGER.log(level=1,
msg="(logid={}) {} prep[{} ms]".format(
parsed_data_dict.keys(), op_info_prefix,
(end_prep - start_prep) / 1e3))
try:
for data_id, err_channeldata in err_channeldata_dict.items():
self._push_to_output_channels(
data=err_channeldata,
channels=output_channels,
client_need_profile=need_profile_dict[data_id],
profile_set=profile_dict[data_id])
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
if len(parsed_data_dict) == 0:
continue
# process
start_midp = profiler.record("midp#{}_0".format(op_info_prefix))
midped_data_dict, err_channeldata_dict \
= self._run_process(preped_data_dict, op_info_prefix)
end_midp = profiler.record("midp#{}_1".format(op_info_prefix))
_LOGGER.log(level=1,
msg="(logid={}) {} midp[{} ms]".format(
preped_data_dict.keys(), op_info_prefix,
(end_midp - start_midp) / 1e3))
try:
for data_id, err_channeldata in err_channeldata_dict.items():
self._push_to_output_channels(
data=err_channeldata,
channels=output_channels,
client_need_profile=need_profile_dict[data_id],
profile_set=profile_dict[data_id])
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
if len(midped_data_dict) == 0:
continue
# postprocess
start_postp = profiler.record("postp#{}_0".format(op_info_prefix))
postped_data_dict, err_channeldata_dict \
= self._run_postprocess(
parsed_data_dict, midped_data_dict, op_info_prefix)
end_postp = profiler.record("postp#{}_1".format(op_info_prefix))
_LOGGER.log(level=1,
msg="(logid={}) {} postp[{} ms]".format(
midped_data_dict.keys(), op_info_prefix,
(end_midp - start_midp) / 1e3))
try:
for data_id, err_channeldata in err_channeldata_dict.items():
self._push_to_output_channels(
data=error_channeldata,
channels=output_channels,
client_need_profile=need_profile_dict[data_id],
profile_set=profile_dict[data_id])
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
if len(postped_data_dict) == 0:
continue
# push data to channel (if run succ)
try:
profile_str = profiler.gen_profile_str()
for data_id, postped_data in postped_data_dict.items():
if self._server_use_profile:
sys.stderr.write(profile_str)
self._push_to_output_channels(
data=postped_data,
channels=output_channels,
profile_str=profile_str,
client_need_profile=need_profile_dict[data_id],
profile_set=profile_dict[data_id])
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
def _initialize(self, is_thread_op, client_type, concurrency_idx):
if is_thread_op:
with self._for_init_op_lock:
if not self._succ_init_op:
# for the threaded version of Op, each thread cannot get its concurrency_idx
self.concurrency_idx = None
# init client
self.client = self.init_client(
client_type, self._client_config,
self._server_endpoints, self._fetch_names)
# user defined
self.init_op()
self._succ_init_op = True
self._succ_close_op = False
else:
self.concurrency_idx = concurrency_idx
# init client
self.client = self.init_client(client_type, self._client_config,
self._server_endpoints,
self._fetch_names)
# user defined
self.init_op()
# use a separate TimeProfiler per thread or process
profiler = TimeProfiler()
profiler.enable(True)
return profiler
def _finalize(self, is_thread_op):
if is_thread_op:
with self._for_close_op_lock:
if not self._succ_close_op:
self._profiler = None
self.client = None
self._succ_init_op = False
self._succ_close_op = True
def _log(self, info):
return "{} {}".format(self.name, info)
class RequestOp(Op):
""" RequestOp do not run preprocess, process, postprocess. """
def __init__(self):
# PipelineService.name = "@G"
super(RequestOp, self).__init__(name="@G", input_ops=[])
# init op
try:
self.init_op()
except Exception as e:
_LOGGER.critical("Op(Request) Failed to init: {}".format(e))
os._exit(-1)
def unpack_request_package(self, request):
dictdata = {}
for idx, key in enumerate(request.key):
data = request.value[idx]
try:
data = eval(data)
except Exception as e:
pass
dictdata[key] = data
return dictdata
class ResponseOp(Op):
""" ResponseOp do not run preprocess, process, postprocess. """
def __init__(self, input_ops):
super(ResponseOp, self).__init__(name="@R", input_ops=input_ops)
# init op
try:
self.init_op()
except Exception as e:
_LOGGER.critical("Op(ResponseOp) Failed to init: {}".format(
e, exc_info=True))
os._exit(-1)
def pack_response_package(self, channeldata):
resp = pipeline_service_pb2.Response()
resp.ecode = channeldata.ecode
if resp.ecode == ChannelDataEcode.OK.value:
if channeldata.datatype == ChannelDataType.CHANNEL_NPDATA.value:
feed = channeldata.parse()
# ndarray to string:
# https://stackoverflow.com/questions/30167538/convert-a-numpy-ndarray-to-stringor-bytes-and-convert-it-back-to-numpy-ndarray
np.set_printoptions(threshold=np.nan)
for name, var in feed.items():
resp.value.append(var.__repr__())
resp.key.append(name)
elif channeldata.datatype == ChannelDataType.DICT.value:
feed = channeldata.parse()
for name, var in feed.items():
if not isinstance(var, str):
resp.ecode = ChannelDataEcode.TYPE_ERROR.value
resp.error_info = self._log(
"fetch var type must be str({}).".format(
type(var)))
_LOGGER.error("(logid={}) Failed to pack RPC "
"response package: {}".format(
channeldata.id, resp.error_info))
break
resp.value.append(var)
resp.key.append(name)
else:
resp.ecode = ChannelDataEcode.TYPE_ERROR.value
resp.error_info = self._log(
"error type({}) in datatype.".format(channeldata.datatype))
_LOGGER.error("(logid={}) Failed to pack RPC response"
" package: {}".format(channeldata.id,
resp.error_info))
else:
resp.error_info = channeldata.error_info
return resp
class VirtualOp(Op):
''' For connecting two channels. '''
def __init__(self, name, concurrency=1):
super(VirtualOp, self).__init__(
name=name, input_ops=None, concurrency=concurrency)
self._virtual_pred_ops = []
def add_virtual_pred_op(self, op):
self._virtual_pred_ops.append(op)
def _actual_pred_op_names(self, op):
# can use disjoint-set, but it's not necessary
if not isinstance(op, VirtualOp):
return [op.name]
names = []
for x in op._virtual_pred_ops:
names.extend(self._actual_pred_op_names(x))
return names
def add_output_channel(self, channel):
if not isinstance(channel, (ThreadChannel, ProcessChannel)):
_LOGGER.critical(
self._log("Failed to add output_channel: output_channel"
" must be Channel type, not {}".format(
type(channel))))
os._exit(-1)
for op in self._virtual_pred_ops:
for op_name in self._actual_pred_op_names(op):
channel.add_producer(op_name)
self._outputs.append(channel)
def _run(self, concurrency_idx, input_channel, output_channels, client_type,
is_thread_op):
op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
log = get_log_func(op_info_prefix)
tid = threading.current_thread().ident
batch_generator = self._auto_batching_generator(
input_channel=input_channel,
op_name=self.name,
batch_size=1,
timeout=None,
log_func=log)
while True:
try:
channeldata_dict_batch = next(batch_generator)
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
try:
for channeldata_dict in channeldata_dict_batch:
for name, data in channeldata_dict.items():
self._push_to_output_channels(
data, channels=output_channels, name=name)
except ChannelStopError:
_LOGGER.debug("{} Stop.".format(op_info_prefix))
self._finalize(is_thread_op)
break
|
import numpy as np
def alpha_pass(OBS, A, B, Pi):
'''
OBS = (1 x k)
A = (n x n)
B = (n x m)
Pi = (1 x n)
alpha = (n x k)
'''
alpha = np.zeros((A.shape[0], OBS.shape[1]))
alpha[:, [0]] = Pi.transpose() * B[:, OBS[:, 0]] # Use Pi to calculate for the first column (in accordance to equation)
for testInd in range(1, OBS.shape[1]):
for stateInd in range(A.shape[0]):
alpha[stateInd, testInd] = (alpha[:, [testInd-1]].T).dot(A[:, [stateInd]]) * B[stateInd, OBS[:, testInd]] # Apply Dynamic Programming Equation
return alpha
def beta_pass(OBS, A, B):
'''
OBS = (1 x k)
A = (n x n)
B = (n x m)
beta = (n x k)
'''
beta = np.zeros((A.shape[0], OBS.shape[1]))
beta[:, OBS.shape[1]-1] = np.ones((A.shape[0])) # Set the values for the last state to 1
for testInd in range(OBS.shape[1]-2, -1, -1):
for stateInd in range(A.shape[0]):
beta[stateInd, testInd] = (beta[:, testInd+1] * B[:, OBS[:, testInd+1]].T).dot(A[stateInd, :].T)
return beta
def baum_welch(OBS, A, B, Pi, iterNum=100):
'''
Expectation-Maximization (EM) Algorithm to optimize the parameters of the HMM such that the model is maximally like the observed data
OBS = (1 x k)
A = (n x n)
B = (n x m)
Pi = (1 x n)
alpha = (n x k)
beta = (n x k)
'''
stateNum = A.shape[0] # n
testNum = OBS.shape[1] # k
for i in range(iterNum):
alpha = alpha_pass(OBS, A, B, Pi)
beta = beta_pass(OBS, A, B)
xi = np.zeros((stateNum, stateNum, testNum-1))
for t in range(testNum-1):
print('alpha: {}'.format(str(alpha[[t], :].T.shape)))
print('dot: {}'.format(str(np.dot(alpha[[t], :].T, A).shape)))
print('B: {}'.format(str(B[:, OBS[t+1]].T)))
normalize = np.dot(
np.dot(alpha[[t], :].T, A) * B[:, OBS[t+1]].T,
beta[t+1, :]
)
for s in range(stateNum):
update = alpha[t,s] * A[s,:] * B[:, OBS[t+1]].T * beta[t+1, :].T
xi[i, :, t] = update / normalize
gamma = np.sum(xi, axis=1)
A = np.sum(xi, 2) / np.sum(gamma, axis=1).reshape((-1, 1))
gamma = np.hstack((gamma, np.sum(xi[:, :, testNum-2], axis=0).reshape((-1, 1))))
obsNum = B.shape[1]
denom = np.sum(gamma, axis=1)
for j in range(obsNum):
B[:, j] = np.sum(gamma[:, OBS==1], axis=1)
B = np.divide(B, denom.reshape((-1, 1)))
return {
"A": A,
"B": B
} |
"""Catalyst class and its metaclass."""
import inspect
from collections import namedtuple
from typing import Iterable, Callable, Any, Mapping
from functools import wraps, partial
from .base import CatalystABC
from .fields import BaseField, FieldDict, Field
from .groups import FieldGroup
from .exceptions import ValidationError, ExceptionType
from .utils import (
missing, assign_attr_or_item_getter, assign_item_getter,
LoadResult, DumpResult, BaseResult, no_processing,
bind_attrs, bind_not_ellipsis_attrs,
)
# type hints
PartialFields = namedtuple('PartialFields', [
'field', 'source', 'target', 'required', 'default', 'field_method'])
PartialGroups = namedtuple('PartialGroups', [
'group_method', 'error_key', 'source_target_pairs'])
def _override_fields(fields: FieldDict, attrs: dict):
"""Collect fields from dict, override fields and remove non fields."""
for name, obj in attrs.items():
if isinstance(obj, type) and issubclass(obj, BaseField):
raise TypeError(
f'Field for "{name}" must be declared as a Field instance, '
f'not a class. Did you mean "{obj.__name__}()"?')
if isinstance(obj, BaseField):
fields[name] = obj # override Field
elif name in fields:
del fields[name] # remove non Field
return fields
def _get_fields_from_classes(fields: FieldDict, classes: Iterable[type]):
"""Collect fields from base classes, following method resolution order."""
for klass in reversed(classes):
if issubclass(klass, CatalystABC):
_override_fields(fields, klass.fields)
else:
# reverse and ignore <class 'object'>
for base in klass.mro()[-2::-1]:
_override_fields(fields, base.__dict__)
return fields
def _get_fields_from_instance(fields: FieldDict, obj):
"""Collect fields from instance."""
if isinstance(obj, CatalystABC):
attrs = obj.fields
else:
attrs = {attr: getattr(obj, attr) for attr in dir(obj)}
return _override_fields(fields, attrs)
def _set_fields(cls_or_obj, fields: FieldDict):
"""Set fields for `Catalyst` class or its instance.
Generate `Field.name` or `Field.key` if it is None.
"""
for name, field in fields.items():
if field.name is None:
field.name = cls_or_obj._format_field_name(name)
if field.key is None:
field.key = cls_or_obj._format_field_key(name)
# inject fields that FieldGroup declared, after all fields are formatted
for field in fields.values():
if isinstance(field, FieldGroup):
field.set_fields(fields)
cls_or_obj.fields = fields
class CatalystMeta(type):
"""Metaclass for `Catalyst` class. Binds fields to `fields` attribute."""
def __new__(cls, name, bases, attrs):
new_cls = super().__new__(cls, name, bases, attrs)
fields = {}
_get_fields_from_classes(fields, bases)
_override_fields(fields, attrs)
_set_fields(new_cls, fields)
return new_cls
class Catalyst(CatalystABC, metaclass=CatalystMeta):
"""Base Catalyst class for converting complex datatypes to and from
native Python datatypes.
Some instantiation params can set default values by class variables.
The available params are `schema`, `raise_error`, `all_errors`,
`except_exception`, `process_aliases`, `DumpResult` and `LoadResult`.
:param schema: A dict or instance or class which contains fields. This
is a convenient way to avoid name clashes when fields are Python
keywords or conflict with other attributes.
:param dump_required: Raise error if the field value doesn't exist.
The `Field.dump_required` will take priority, if it is not `None`.
:param load_required: Similar to `dump_required`.
:param dump_default: The default value when the field value doesn't exist.
If set, `dump_required` has no effect.
Particularly, the `missing` object means that this field will not exist
in result, and `None` means that default value is `None`.
The `Field.dump_default` will take priority, if it is not `missing`.
:param load_default: Similar to `dump_default`.
:param raise_error: Whether to raise error if error occurs when
processing data. Errors are collected into a error dict, which key
is field name, index of item of iterable or process name.
:param all_errors: Whether to collect every errors of data and
errors of process.
:param except_exception: Which types of errors should be collected
into process result. Usage is the same as `try/except` statement.
:param process_aliases: A dict which key is process name, and value
is process alias. When the process goes wrong, if its process name
is in `process_aliases` dcit, the process alias will be a key
in error dict with a value whict is the error.
Available process names are 'pre_dump', 'dump', 'post_dump',
'pre_dump_many', 'dump_many', 'post_dump_many', 'load', etc.
:param include: The fields to include in both dump and load fields.
If None, all fields are used.
If `dump_include` or `load_include` is passed, `include` will
not be used for dump or load fields.
:param exclude: The fields to exclude from both dump and load fields.
If a field appears in both `include` and `exclude`, it is not used.
If `dump_exclude` or `load_exclude` is passed, `exclude` will
not be used for dump or load fields.
`Field.no_dump` and `Field.no_load` are also used to filter fields.
The fields filtering works like set operation, for example::
used_fields = original_fields & include - exclude
:param dump_include: The fields to include in dump fields.
:param dump_exclude: The fields to exclude from dump fields.
:param load_include: The fields to include in load fields.
:param load_exclude: The fields to exclude from dump fields.
"""
schema: Any = None
raise_error = False
all_errors = True
except_exception: ExceptionType = Exception
process_aliases = {}
dump_required = True
load_required = False
dump_default = missing
load_default = missing
dump_result_class = DumpResult
load_result_class = LoadResult
fields: FieldDict = {}
# assign getter for dumping & loading
_assign_dump_getter = staticmethod(assign_attr_or_item_getter)
_assign_load_getter = staticmethod(assign_item_getter)
# generate field name and key and custom naming style
_format_field_key = staticmethod(no_processing)
_format_field_name = staticmethod(no_processing)
def __init__(
self,
schema: Any = None,
raise_error: bool = None,
all_errors: bool = None,
except_exception: ExceptionType = None,
process_aliases: Mapping[str, str] = None,
dump_required: bool = None,
load_required: bool = None,
dump_default: Any = ...,
load_default: Any = ...,
include: Iterable[str] = None,
exclude: Iterable[str] = None,
dump_include: Iterable[str] = None,
dump_exclude: Iterable[str] = None,
load_include: Iterable[str] = None,
load_exclude: Iterable[str] = None):
bind_attrs(
self,
schema=schema,
raise_error=raise_error,
all_errors=all_errors,
except_exception=except_exception,
process_aliases=process_aliases,
dump_required=dump_required,
load_required=load_required,
)
# `None` is meaningful to `dump_default` and `load_default`,
# use `...` to represent that the arguments are not given
# which also provides type hints.
bind_not_ellipsis_attrs(
self,
dump_default=dump_default,
load_default=load_default,
)
# set fields from a dict or instance or class
schema = self.schema
if schema:
fields = self.fields.copy()
if isinstance(schema, Mapping):
_override_fields(fields, schema)
elif isinstance(schema, type):
_get_fields_from_classes(fields, [schema])
else:
_get_fields_from_instance(fields, schema)
_set_fields(self, fields)
# include fields
if include is None:
include = self.fields.keys()
if dump_include is None:
dump_include = include
if load_include is None:
load_include = include
# exclude fields
exclude = set() if exclude is None else set(exclude)
dump_exclude = exclude if dump_exclude is None else set(dump_exclude)
load_exclude = exclude if load_exclude is None else set(load_exclude)
if dump_exclude:
dump_include = (field for field in dump_include if field not in dump_exclude)
if load_exclude:
load_include = (field for field in load_include if field not in load_exclude)
try:
self._dump_fields = self._copy_fields(
self.fields, dump_include,
lambda key: not self.fields[key].no_dump)
self._load_fields = self._copy_fields(
self.fields, load_include,
lambda key: not self.fields[key].no_load)
except KeyError as error:
raise ValueError(f'Field "{error.args[0]}" does not exist.') from error
# make processors when initializing for shorter run time
self._do_dump = self._make_processor('dump', False)
self._do_load = self._make_processor('load', False)
self._do_dump_many = self._make_processor('dump', True)
self._do_load_many = self._make_processor('load', True)
@staticmethod
def _copy_fields(
fields: FieldDict, keys: Iterable[str],
is_copying: Callable[[str], bool]) -> FieldDict:
new_fields = {}
for key in keys:
if is_copying(key):
new_fields[key] = fields[key]
return new_fields
@staticmethod
def _process_one(
data: Any,
all_errors: bool,
assign_getter: Callable,
partial_fields: Iterable[PartialFields],
partial_groups: Iterable[PartialGroups],
except_exception: ExceptionType):
"""Process one object using fields and catalyst options."""
# According to the type of `data`, assign a function to get field value from `data`
get_value = assign_getter(data)
valid_data, errors, invalid_data = {}, {}, {}
# process data for each fields
for field, source, target, required, default, field_method in partial_fields:
value = missing
try:
value = get_value(data, source, missing)
if value is missing:
value = default() if callable(default) else default
if value is not missing:
value = field_method(value)
if value is missing:
if required:
raise field.error('required')
else:
valid_data[target] = value
except except_exception as e:
if isinstance(e, ValidationError) and isinstance(e.detail, BaseResult):
detail: BaseResult = e.detail
# distribute nested data in BaseResult
valid_data[target] = detail.valid_data
errors[source] = detail.errors
invalid_data[source] = detail.invalid_data
else:
# collect errors and invalid data
errors[source] = e
if value is not missing:
invalid_data[source] = value
if not all_errors:
break
# field groups depend on fields, if error occurs, do not continue
if errors:
return valid_data, errors, invalid_data
# process data for each field groups
for group_method, error_key, source_target_pairs in partial_groups:
try:
valid_data = group_method(valid_data, original_data=data)
except except_exception as e:
if isinstance(e, ValidationError) and isinstance(e.detail, BaseResult):
detail: BaseResult = e.detail
# distribute nested data in BaseResult
try:
valid_data.update(detail.valid_data)
errors.update(detail.errors)
invalid_data.update(detail.invalid_data)
except (ValueError, TypeError):
errors[error_key] = detail.format_errors()
else:
# collect errors and invalid data
errors[error_key] = e
for source, target in source_target_pairs:
if target in valid_data:
invalid_data[source] = valid_data.pop(target)
if not all_errors:
break
return valid_data, errors, invalid_data
@staticmethod
def _process_many(data: Iterable, all_errors: bool, process_one: Callable):
"""Process multiple objects using fields and catalyst options."""
valid_data, errors, invalid_data = [], {}, {}
for i, item in enumerate(data):
result = process_one(item, raise_error=False)
valid_data.append(result.valid_data)
if not result.is_valid:
errors[i] = result.errors
invalid_data[i] = result.invalid_data
if not all_errors:
break
return valid_data, errors, invalid_data
def _make_processor(self, name: str, many: bool) -> Callable:
"""Create processor for dumping and loading processes. And wrap basic
main process with pre and post processes. Determine parameters for
different processes in advance to reduce processing time.
"""
if name == 'dump':
result_class = self.dump_result_class
elif name == 'load':
result_class = self.load_result_class
else:
raise ValueError('Argument "name" must be "dump" or "load".')
all_errors = self.all_errors
except_exception = self.except_exception
if many:
main_process = partial(
self._process_many,
all_errors=all_errors,
process_one=getattr(self, name))
method_name = name + '_many'
else:
method_name = name
if name == 'dump':
assign_getter = self._assign_dump_getter
field_dict = self._dump_fields
source_attr = 'name'
target_attr = 'key'
default_attr = 'dump_default'
required_attr = 'dump_required'
else:
assign_getter = self._assign_load_getter
field_dict = self._load_fields
source_attr = 'key'
target_attr = 'name'
default_attr = 'load_default'
required_attr = 'load_required'
# the required options for all fields
general_required = getattr(self, required_attr)
general_default = getattr(self, default_attr)
partial_fields, partial_groups = [], []
for field in field_dict.values():
if isinstance(field, FieldGroup):
# get partial arguments from FieldGroup
group: FieldGroup = field
group_method = getattr(group, method_name)
group_method = self._modify_processer_parameters(group_method)
error_key = getattr(group, source_attr)
source_target_pairs = []
for f in group.fields.values():
source = getattr(f, source_attr)
target = getattr(f, target_attr)
source_target_pairs.append((source, target))
partial_groups.append(
PartialGroups(group_method, error_key, source_target_pairs))
elif isinstance(field, Field):
# get partial arguments from Field
field_method = getattr(field, method_name)
source = getattr(field, source_attr)
target = getattr(field, target_attr)
required = getattr(field, required_attr)
if required is None:
required = general_required
default = getattr(field, default_attr)
if default is ...:
default = general_default
partial_fields.append(
PartialFields(field, source, target, required, default, field_method))
main_process = partial(
self._process_one,
all_errors=all_errors,
assign_getter=assign_getter,
partial_fields=partial_fields,
partial_groups=partial_groups,
except_exception=except_exception)
# assign params as closure variables for processor
pre_process_name = f'pre_{method_name}'
post_process_name = f'post_{method_name}'
pre_process = getattr(self, pre_process_name)
post_process = getattr(self, post_process_name)
post_process = self._modify_processer_parameters(post_process)
process_aliases = self.process_aliases
default_raise_error = self.raise_error
def integrated_process(data, raise_error):
"""The actual execution function to do dumping and loading."""
if raise_error is None:
raise_error = default_raise_error
try:
# pre process
process_name = pre_process_name
valid_data = pre_process(data)
# main process
process_name = method_name
valid_data, errors, invalid_data = main_process(valid_data)
# post process
if not errors:
process_name = post_process_name
valid_data = post_process(valid_data, original_data=data)
except except_exception as e:
# handle error which raised during processing
key = process_aliases.get(process_name, process_name)
errors = {key: e}
invalid_data = data
if many:
valid_data = []
else:
valid_data = {}
result = result_class(valid_data, errors, invalid_data)
if errors and raise_error:
raise ValidationError(msg=result.format_errors(), detail=result)
return result
return integrated_process
def _modify_processer_parameters(self, func):
"""Modify the parameters of the processer function.
Ignore `original_data` if it's not one of the parameters.
"""
sig = inspect.signature(func)
if 'original_data' not in sig.parameters:
@wraps(func)
def wrapper(data, original_data=None):
return func(data)
return wrapper
return func
def _process_args(self, func: Callable, processor: Callable) -> Callable:
"""Decorator for handling args by catalyst before function is called.
The wrapper function takes args as same as args of the raw function.
If args are invalid, error will be raised. In general, `*args` should
be handled by `ListField`, and `**kwargs` should be handled by `NestedField`.
"""
sig = inspect.signature(func)
@wraps(func)
def wrapper(*args, **kwargs):
ba = sig.bind(*args, **kwargs)
result = processor(ba.arguments, raise_error=True)
ba.arguments.update(result.valid_data)
return func(*ba.args, **ba.kwargs)
return wrapper
def dump(self, data: Any, raise_error: bool = None) -> DumpResult:
"""Serialize `data` according to defined fields."""
return self._do_dump(data, raise_error)
def load(self, data: Any, raise_error: bool = None) -> LoadResult:
"""Deserialize `data` according to defined fields."""
return self._do_load(data, raise_error)
def dump_many(self, data: Iterable, raise_error: bool = None) -> DumpResult:
"""Serialize multiple objects."""
return self._do_dump_many(data, raise_error)
def load_many(self, data: Iterable, raise_error: bool = None) -> LoadResult:
"""Deserialize multiple objects."""
return self._do_load_many(data, raise_error)
def dump_args(self, func: Callable) -> Callable:
"""Decorator for serializing arguments of the function."""
return self._process_args(func, self.dump)
def load_args(self, func: Callable = None) -> Callable:
"""Decorator for deserializing arguments of the function."""
return self._process_args(func, self.load)
# pre and post processes
def pre_dump(self, data):
return data
def post_dump(self, data, original_data=None):
return data
def pre_load(self, data):
return data
def post_load(self, data, original_data=None):
return data
def pre_dump_many(self, data):
return data
def post_dump_many(self, data, original_data=None):
return data
def pre_load_many(self, data):
return data
def post_load_many(self, data, original_data=None):
return data
|
# The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: <NAME>
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, XArrayWrapper
from skdaccess.utilities.sounding_util import SoundingParser, generateQueries, convertToStr
# 3rd party imports
import pandas as pd
import numpy as np
import xarray as xr
import pygrib
# Standard library imports
from collections import OrderedDict
from http.cookiejar import LWPCookieJar
import os
from urllib.parse import urlencode
from urllib.request import HTTPCookieProcessor, build_opener
from datetime import datetime
def _extractParamters(data_list, data_names, levels):
'''
Extract data from grib2 era-I data
Written for ERA Interim atmospheric model analysis interpolated to pressure levels
@param data_list: List of opened files in pygrib
@param data_names: Names of data to pull from pygrib files
@param levels: Number of levels of data
'''
# This list contains redundant data
skip_list = ['latLonValues', 'distinctLatitudes','distinctLongitudes',
'values', 'longitudes','latitudes', 'level', 'year',
'month','day','hour','minute','second']
# These records can change every level and date
multi_level_records = ['referenceValue', 'maximum', 'minimum','average',
'standardDeviation', 'skewness', 'kurtosis',
'binaryScaleFactor', 'packingError', 'unpackedError',
'referenceValueError']
# These records can change every date, but not every level
multi_date_records = ['yearOfCentury','dataDate', 'validityTime',
'julianDay', 'validityDate', 'dataTime']
class MetadataMismatch(Exception):
''' Raised if Metadata from different levels doesn't match '''
lat = None
lon = None
data_dict = OrderedDict()
meta_dict = OrderedDict()
data_time_list = []
date_range = []
meta_time_dict = OrderedDict()
meta_only_time_dict = OrderedDict()
# Loop over every data file
for weather in data_list:
weather.rewind()
# Need to determine the date of each file once
measured_date = False
# Loop over every requested data product
for data_name in data_names:
# If no meta data exists yet, add it
if data_name not in meta_dict:
meta_dict[data_name] = OrderedDict()
# List to hold data
data_list = []
prev_level = None
meta_multi_dict = OrderedDict()
meta_only_time_temp_dict = OrderedDict()
for index, record in enumerate(weather.select(name=data_name)):
# Need to record latitude and longitude the first time through
if lat is None:
lat, lon = record.latlons()
# If date hasn't been recorded yet, save it
if measured_date == False:
date = datetime(record['year'], record['month'], record['day'],
record['hour'], record['minute'], record['second'])
date_range.append(pd.to_datetime(date))
measured_date = True
# If the data is iterated through out of order
# throw an exception
if record['level'] != levels[index]:
raise RuntimeError('Level mismatch')
# loop over key in the record
for label in record.keys():
if label not in skip_list:
try:
# This covers if the record can change
# each level/date
if label in multi_level_records:
if label not in meta_multi_dict:
meta_multi_dict[label] = []
meta_multi_dict[label].append(record[label])
# This covers metadata that changes in time but no in levels
elif label in multi_date_records:
if label not in meta_only_time_temp_dict:
meta_only_time_temp_dict[label] = OrderedDict()
meta_only_time_temp_dict[label]['index'] = date_range[-1]
meta_only_time_temp_dict[label]['data'] = record[label]
else:
if meta_only_time_temp_dict[label]['index'] != date_range[-1] or \
meta_only_time_temp_dict[label]['data'] != record[label]:
raise MetadataMismatch
# If the metadata doesn't exist yet, add it to the metadata dictionary
elif label not in meta_dict[data_name]:
meta_dict[data_name][label] = record[label]
# If the metadata already exists, check to make sure it
# hasn't changed
elif np.all(meta_dict[data_name][label] != record[label]):
print(label, meta_dict[data_name][label], record[label])
raise MetadataMismatch('Levels have different metadata')
# Sometimes there is not value for a key. A runtime exception is thrown
# in this case
except RuntimeError as RE:
if label not in meta_dict[data_name] or meta_dict[data_name][label] == None:
meta_dict[data_name][label] = None
else:
raise MetadataMismatch('Levels have different metadata')
# Record the data for this level
data_list.append(record['values'])
# If data hasn't been added to the dictionary yet, add it
if data_name not in data_dict:
data_dict[data_name] = []
# Create a stack from each level
data_dict[data_name].append(np.stack(data_list))
# Need to save metadata that changes each level/date
for label, data in meta_multi_dict.items():
# Check that it has the correct number of data points
if len(data) != len(levels):
raise RuntimeError('Missing metadata')
# If no key is in meta_time_dict for the data, add it
# This dictionary stores metadata that changes with
# levels / time
if data_name not in meta_time_dict:
meta_time_dict[data_name] = OrderedDict()
# If this particular metadata hasn't been saved before,
# add an entry for it
if label not in meta_time_dict[data_name]:
meta_time_dict[data_name][label] = OrderedDict()
# Finally, add the metadata as a pandas series to the metadata
meta_time_dict[data_name][label][date_range[-1]] = pd.Series(data,index=levels)
meta_time_dict[data_name][label][date_range[-1]].index.name = 'Level'
# Need to save metadata that changes for date only
for label, data in meta_only_time_temp_dict.items():
if data_name not in meta_only_time_dict:
meta_only_time_dict[data_name] = OrderedDict()
if label not in meta_only_time_dict[data_name]:
meta_only_time_dict[data_name][label] = OrderedDict()
meta_only_time_dict[data_name][label]['index'] = []
meta_only_time_dict[data_name][label]['data'] = []
meta_only_time_dict[data_name][label]['index'].append(data['index'])
meta_only_time_dict[data_name][label]['data'].append(data['data'])
# Convert sequence of data cubes to a 4d array
for label in data_dict.keys():
data_dict[label] = (['time','z','y','x'], np.stack(data_dict[label]))
# Create dataframes for metadata that changes in levels and time
for data_name, labels in meta_time_dict.items():
for label, dates in labels.items():
meta_dict[data_name][label] = pd.DataFrame.from_dict(dates)
# Create pandas series for metadata that changes in time
for data_name, labels in meta_only_time_dict.items():
for label, data in labels.items():
meta_dict[data_name][label] = pd.Series(data['data'], index=data['index'])
meta_dict[data_name][label].index.name = 'Date'
# Create dataset
ds = xr.Dataset(data_dict, coords={'lat': (['y','x'], lat),
'lon': (['y','x'], lon),
'pressure': (['z'], levels),
'time': date_range})
# Set metadata
for label, metadata in meta_dict.items():
ds[label].attrs = metadata
return ds
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving ERA-I data '''
def __init__(self, date_list, data_names, username, password):
'''
Initialize Data Fetcher
@param date_list: list of dates
@param data_names: list of data names
@param username: UCAR username
@param password: <PASSWORD>
'''
self.date_list = date_list
self.data_names = data_names
self.username = username
self.password = password
super(DataFetcher, self).__init__()
def output(self):
'''
Generate data wrapper
@return Era-I weather in a data wrapper
'''
class ExpiredCookieError(Exception):
''' Exception to use if cookie is expired '''
class IncorrectNumberOfCookies(Exception):
''' Exception to use if the number of cookies loaded is incorrect '''
def getCookies(cookies):
request = OrderedDict()
request['email'] = self.username
request['passwd'] = self.password
request['action'] = 'login'
data = urlencode(request).encode()
url_opener = build_opener(HTTPCookieProcessor(cookies))
with url_opener.open('https://rda.ucar.edu/cgi-bin/login', data) as myurl:
cookies.save()
# Get absolute path to data directory
data_location = DataFetcherCache.getDataLocation('era_interim')
# Create cookiejar
cookiejar = LWPCookieJar(os.path.join(data_location, 'cookies.txt'))
try:
cookiejar.load()
if len(cookiejar) != 3:
raise IncorrectNumberOfCookies
current_time = pd.to_datetime(pd.datetime.utcnow())
for cookie in cookiejar:
expiration_time = pd.to_datetime(cookie.expires, unit='s')
# If cookie has less than a week left, recreate all cookies
if (expiration_time - current_time) < pd.to_timedelta('7D'):
raise ExpiredCookieError
# No cookie file
except (FileNotFoundError, IncorrectNumberOfCookies):
cookiejar.clear()
getCookies(cookiejar)
# Cookies will expire soon or have already expired
except ExpiredCookieError:
cookiejar.clear()
getCookies(cookiejar)
base_url = 'https://rda.ucar.edu/data/ds627.0/ei.oper.an.pl/'
url_list = [ date.strftime('%Y%m/ei.oper.an.pl.regn128sc.%Y%m%d%H')
for date in self.date_list ]
url_list = [ base_url + url for url in url_list ]
file_list = self.cacheData('era_interim', url_list, cookiejar=cookiejar)
pygrib_files = [pygrib.open(filename) for filename in file_list]
levels = [
1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100,
125, 150, 175, 200, 225, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 775, 800, 825, 850, 875, 900,
925, 950, 975, 1000
]
data = _extractParamters(pygrib_files, self.data_names, levels)
wrapped_data = XArrayWrapper(data, self.data_names)
return wrapped_data
|
<filename>deeplearning1/nbs/Homework_2/organize_data.py
#!/home/ubuntu/anaconda2/bin/python
from collections import defaultdict
import os
import sys
import numpy as np
import shutil
def read_label_mapping(l_file):
l_dict = defaultdict(list)
with open(l_file) as ifh:
for line in ifh:
(f, l) = line.strip().split(",")
l_dict[l].append(f)
return l_dict
def move_to_label_dirs(l_map, dirname):
for k in l_map.keys():
k_dir = os.path.join(dirname, k)
# create a directory for every label
mkdir_safe(k_dir)
# For every img with this label, attempt to move it
for f in l_map[k]:
src_path = os.path.join(dirname, f)
dst_path = os.path.join(k_dir, f)
if os.path.exists(src_path):
os.rename(src_path, dst_path)
def reorganize_dir(l_map, dirname, val_frac):
if val_frac > 0.5:
print "validation fraction should be smaller than 0.5"
sys.ext(-1)
# Create test/val/train dirs
test = os.path.join(dirname, "test")
train = os.path.join(dirname, "train")
val = os.path.join(dirname, "val")
mkdir_safe(test)
mkdir_safe(train)
mkdir_safe(val)
labelled_imgs = []
for v in l_map.values():
labelled_imgs.extend(v)
imgs = find_jpgs(dirname)
# Move unlabelled imgs to test directory
for f in imgs:
if not f in labelled_imgs:
src_path = os.path.join(dirname, f)
dst_path = os.path.join(test, f)
os.rename(src_path, dst_path)
# For all remaining (i.e., labelled images),
# select per label (!!!) randomly which ones go in validation and which in train
# This is important because we want at least 1 whale per label during training
move_to_label_dirs(l_map, dirname)
# In the sample set, some whale types are not present
# keep track of that here
no_whales_found = []
for label in l_map.keys():
label_dir = os.path.join(dirname, label)
if not os.path.exists(label_dir):
continue
imgs = find_jpgs(label_dir)
img_cnt = len(imgs)
if img_cnt == 0:
no_whales_found.append(label)
shutil.rmtree(label_dir)
continue
val_cnt = int(val_frac * img_cnt)
trn_cnt = img_cnt - val_cnt
print img_cnt, val_cnt, trn_cnt
assert(trn_cnt >= 1)
# Randomly pick images for the validation set of this label
if val_cnt > 0:
label_val = os.path.join(val, label)
mkdir_safe(label_val)
shuf = np.random.permutation(imgs)
for i in range(val_cnt): os.rename(os.path.join(label_dir, shuf[i]), os.path.join(label_val, shuf[i]))
# Move all remaining images to the training set
shutil.move(label_dir, os.path.join(train))
print "Reorganized directory. Did not find the following whales: "
print no_whales_found
def find_jpgs(dirname):
imgs = []
for f in os.listdir(dirname):
if f.endswith(".jpg"):
imgs.append(f)
return imgs
def mkdir_safe(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
if __name__ == "__main__":
print "Hello Texas"
label_file = "train.csv"
labels = read_label_mapping(label_file)
reorganize_dir(labels, "imgs_subset", val_frac = 0.5)
|
from Pipeline.Authentication import Authentication
import requests
import json
import copy
from pprint import pprint
import time
def find_snomedct_server(term):
apikey = "ca310f05-53e6-4984-82fd-8691dc30174e"
AuthClient = Authentication(apikey)
version = "2017AB"
tgt = AuthClient.gettgt()
query = {'ticket': AuthClient.getst(tgt), 'targetSource': 'SNOMEDCT_US'}
base_uri = "https://uts-ws.nlm.nih.gov/rest"
search_uri = "/search/current?string="
content_uri = "/content/current/CUI/"
source = "&sabs=SNOMEDCT_US"
search_type = '&searchType=words'
path = base_uri + search_uri + term + search_type + source
r = requests.get(path, params=query)
code, name, semantic = "", "", ""
try:
items = json.loads(r.text)
pprint(items['result']['results'])
code, name = select_code(items['result']['results'], term)
if code != "":
path2 = base_uri + content_uri + code
tgt2 = AuthClient.gettgt()
query2 = {'ticket': AuthClient.getst(tgt2), 'targetSource': 'SNOMEDCT_US'}
r2 = requests.get(path2, params=query2)
try:
items2 = json.loads(r2.text)
semantic = items2['result']['semanticTypes'][0]['name']
except json.decoder.JSONDecodeError:
semantic = "UNKNOWN"
except json.decoder.JSONDecodeError:
code, name = "", ""
return code, name, semantic
def find_snomed(snomed_term, snomed_code, term):
match_code = []
local = time.time()
for description, code in zip(snomed_term, snomed_code):
if term in description:
sub = {
'name': description,
'ui': code
}
match_code.append(sub)
# print(match_code)
code, name = select_code(match_code, term)
return code, name
# Select the maximum code and name that matches the searched term
def select_code(results, term):
# Initialize the minimum number of matches threshold we accept
score = 0.6
def_score = 0.4
code, name = "",""
for result in results:
title = result['name']
temp_score, temp_def_score = calculate_score(title, term)
if temp_score > score and temp_def_score > def_score:
score = temp_score
def_score = temp_def_score
code = result['ui']
name = title
return code, name
# Calculate the similarity score between SNOMED CT name and the term to be searched
def calculate_score(name, term):
score, score_name = 0, 0
separate = str(term).lower().split(' ')
separate_copy = copy.deepcopy(separate)
number = len(separate)
definitions = str(name).lower().split(' (')[0].split(' ')
definitions_copy = copy.deepcopy(definitions)
number_of_definitions = len(definitions)
for word in definitions:
if separate_copy != None:
if word.lower() in separate_copy:
score_name = score_name + 1
separate_copy.remove(word.lower())
elif len(word) > 1 and word[-1] == 's' and word[:-1].lower() in separate_copy:
score_name = score_name + 1
separate_copy.remove(word[:-1].lower())
elif word.lower() == 'centimeter' and separate_copy[0] == 'cm':
score_name = score_name + 1
separate_copy.remove(separate_copy[0])
else:
for sep in separate_copy:
if word.lower() in sep:
score_name = score_name + 1
separate_copy.remove(sep)
break
# term = str(term).replace(word.lower(), "")
for word in separate:
if definitions_copy != None:
if word.lower() != 'x' and word.replace('.', '', 1).isdigit() == False \
and word.lower() in definitions_copy:
score = score + 1
definitions_copy.remove(word.lower())
elif len(word) >= 1 and word[-1] == 's' and word[:-1].lower() in definitions_copy:
score = score + 1
definitions_copy.remove(word[:-1].lower())
elif word.replace('.', '', 1).isdigit() and len(definitions_copy) == 1 \
and definitions_copy[0].replace('.','',1).isdigit():
score = score + 1
definitions_copy.remove(word.lower())
elif word.lower() == 'cm' and definitions_copy[0] == 'centimeter':
score = score + 1
definitions_copy.remove(definitions_copy[0])
elif word.lower() != 'x' and word.replace('.', '', 1).isdigit() == False:
for defi in definitions_copy:
if word.lower() in defi:
score = score + 1
definitions_copy.remove(defi)
break
# name = str(name).replace(word.lower(), '')
return score/number, score_name/number_of_definitions
|
<filename>tests/test_code_snippets.py
import os
import glob
def test_code_snippets(parser):
this_file = os.path.realpath(os.path.dirname(__file__))
root_path = os.path.split(os.path.abspath(os.path.join(this_file)))[0]
snippets_path = os.path.join(root_path, "tests", "code_snippets", "*.c")
example_idx = 0
for example in glob.iglob(snippets_path):
print("Parsing: %s" % example)
parser.parse_file(example)
example_idx += 1
def test_decl_after_if(parser):
code = """
int main(int argc, const char **argv){
int a = 1;
if (a > 5) {
}
int n;
return 0;
}
"""
parser.parse(code)
def test_for_loop_v1(parser):
code = """
int main(int argc, const char **argv){
int i;
for(i = 0; i < 10; i++) {
}
}
"""
parser.parse(code)
def test_for_loop_v2(parser):
"""Tests parsing of for-loop without initialization."""
code = """
int main(int argc, const char **argv){
int i = 0;
for(; i < 10; i++) {
}
}
"""
parser.parse(code)
def test_for_loop_v3(parser):
"""Tests parsing of for-loop without initialization and condition."""
code = """
int main(int argc, const char **argv){
int i = 0;
for(;; i++) {
if ( i < 10 ) {
break;
}
}
}
"""
parser.parse(code)
def test_for_loop_v4(parser):
"""Tests parsing of the infinite for-loop."""
code = """
int main(int argc, const char **argv){
int i = 0;
for(;;) {
if ( i < 10 ) {
break;
}
i++;
}
}
"""
parser.parse(code)
def test_foor_without_braces(parser):
"""Tests parsing of the for-loop without braces."""
code = """
int main(int argc, const char **argv){
int i;
int a = 0;
for(i = 0; i < 10; i++)
a = i * 2;
}
"""
parser.parse(code)
def test_for_loop_c99(parser):
"""Tests parsing of C99 compatible for-loop."""
code = """
int main(int argc, const char **argv){
int j;
for(int i = 0, j = 0;;) {
if ( i < 10 ) {
break;
}
i++;
}
}
"""
parser.parse(code)
def test_pp_in_loop(parser):
code = """
void main(){
int sum = 0;
for(int i = 0; i < 10; i++){
# 2 "some fictive path"
sum =+ 1;
}
}
"""
parser.parse(code)
def test_uint(parser):
code = """
unsigned int state = 362436069U;
"""
parser.parse(code)
def test_long(parser):
code = """
int state = 362436069L;
"""
parser.parse(code)
def test_unsigned_long(parser):
code = """
unsigned long ul65536 = 65536UL;
"""
parser.parse(code)
def test_float(parser):
code = """
float state = 0.0f;
"""
parser.parse(code)
def test_hex(parser):
code = """
int state = 0x0;
"""
parser.parse(code)
def test_hex_with_u_suffix(parser):
code = """
int state = 0xFFu;
"""
parser.parse(code)
def test_unsigned_int(parser):
code = """
unsigned truncUint(int sizeOfWord, unsigned n)
{
if (sizeOfWord == 2)
n &= ~(~0u << 8 << 8);
else if (sizeOfWord == 4)
n &= ~(~0u << 8 << 12 << 12);
return n;
}
"""
parser.parse(code)
def test_pp_line_in_stat(parser):
code = """
void loop(int n) {
for(int i = 0; i < n; i++){
}
# 1 "C:\\cparser\\utils\\math.h"
}
"""
parser.parse(code)
def test_adjacent_strings(parser):
code = """
void main(){
printf("DEAD""BEEF");
}
"""
parser.parse(code)
def test_compound_expressions(parser):
code = r"""
int escape(char* in){
int slashes = 0;
while ((c = *in++) != '\0')
{
if (c == '\\')
{
do {
slashes++;
} while ((c = *in++) == '\\');
}
}
return slashes;
}
"""
parser.parse(code)
def test_array(parser):
code = """
int cases[] = {
# 1 "C:\\some\\path\\scanf.c"
{ 1, 2, 3, 4 },
# 2 "C:\\some\\path\\math.h"
{ 5, 6, 7, 8 },
};
"""
parser.parse(code)
def test_slash_in_char(parser):
code = r"""
char a = '\'';
char b = '"';
"""
parser.parse(code)
|
<filename>tk_builder/widgets/image_canvas.py
# -*- coding: utf-8 -*-
"""
This module provides functionality for
"""
import PIL.Image
from PIL import ImageTk
import platform
import time
import tkinter
import tkinter.colorchooser as colorchooser
from typing import Union, Tuple, List, Dict
import numpy
from scipy.linalg import norm
from tk_builder.base_elements import BooleanDescriptor, IntegerDescriptor, \
IntegerTupleDescriptor, StringDescriptor, TypedDescriptor, FloatDescriptor
from tk_builder.widgets import basic_widgets
from tk_builder.utils.color_utils.hex_color_palettes import SeabornHexPalettes
from tk_builder.utils.color_utils import color_utils
from tk_builder.image_readers.image_reader import ImageReader
from tk_builder.utils.geometry_utils import polygon_utils
if platform.system() == "Linux":
import pyscreenshot as ImageGrab
else:
from PIL import ImageGrab
class CanvasImage(object):
"""
The canvas image object.
"""
image_reader = TypedDescriptor(
'image_reader', ImageReader,
docstring='The image reader object.') # type: ImageReader
canvas_decimated_image = TypedDescriptor(
'canvas_decimated_image', numpy.ndarray,
docstring='The canvas decimated image data.') # type: numpy.ndarray
display_image = TypedDescriptor(
'display_image', numpy.ndarray,
docstring='The display image data.') # type: numpy.ndarray
decimation_factor = IntegerDescriptor(
'decimation_factor', default_value=1,
docstring='The decimation factor.') # type: int
display_rescaling_factor = FloatDescriptor(
'display_rescaling_factor', default_value=1.0,
docstring='The display resclaing factor.') # type: float
canvas_full_image_upper_left_yx = IntegerTupleDescriptor(
'canvas_full_image_upper_left_yx', length=2, default_value=(0, 0),
docstring='The upper left corner of the full image canvas in '
'yx order.') # type: tuple
canvas_ny = IntegerDescriptor(
'canvas_ny',
docstring='') # type: int
canvas_nx = IntegerDescriptor(
'canvas_nx',
docstring='') # type: int
scale_to_fit_canvas = BooleanDescriptor(
'scale_to_fit_canvas', default_value=True,
docstring='Scale the image to fit the canvas?') # type: bool
def __init__(self, image_reader, canvas_nx, canvas_ny):
"""
Parameters
----------
image_reader : ImageReader
canvas_nx : int
canvas_ny : int
"""
self.drop_bands = [] # type: List
self.image_reader = image_reader
self.canvas_nx = canvas_nx
self.canvas_ny = canvas_ny
self.update_canvas_display_image_from_full_image()
def get_decimated_image_data_in_full_image_rect(self, full_image_rect, decimation):
"""
Get decimated data.
Parameters
----------
full_image_rect : Tuple|List
decimation : int
Returns
-------
numpy.ndarray
"""
y_start = full_image_rect[0]
y_end = full_image_rect[2]
x_start = full_image_rect[1]
x_end = full_image_rect[3]
decimated_data = self.image_reader[y_start:y_end:decimation, x_start:x_end:decimation]
return decimated_data
def get_scaled_display_data(self, decimated_image):
"""
Gets scaled data for display.
Parameters
----------
decimated_image : numpy.ndarray
Returns
-------
numpy.ndarray
"""
scale_factor = self.compute_display_scale_factor(decimated_image)
new_nx = int(decimated_image.shape[1] * scale_factor)
new_ny = int(decimated_image.shape[0] * scale_factor)
if new_nx > self.canvas_nx:
new_nx = self.canvas_nx
if new_ny > self.canvas_ny:
new_ny = self.canvas_ny
if len(self.drop_bands) != 0:
zeros_image = numpy.zeros_like(decimated_image[:, :, 0])
for drop_band in self.drop_bands:
decimated_image[:, :, drop_band] = zeros_image
pil_image = PIL.Image.fromarray(decimated_image)
display_image = pil_image.resize((new_nx, new_ny))
return numpy.array(display_image)
def decimated_image_coords_to_display_image_coords(self, decimated_image_yx_cords):
"""
Convert from decimated image coordinates to display coordinates.
Parameters
----------
decimated_image_yx_cords : List[Tuple[float, float]]
Returns
-------
List[Tuple[float, float]]
"""
scale_factor = self.compute_display_scale_factor(self.canvas_decimated_image)
return [(coord[0]*scale_factor, coord[1]*scale_factor) for coord in decimated_image_yx_cords]
def display_image_coords_to_decimated_image_coords(self, display_image_yx_coords):
"""
Convert from display coordinates to decimated image coordinates.
Parameters
----------
display_image_yx_coords : List[Tuple[float, float]]
Returns
-------
List[Tuple[float, float]]
"""
scale_factor = self.compute_display_scale_factor(self.canvas_decimated_image)
return [(coord[0]/scale_factor, coord[1]/scale_factor) for coord in display_image_yx_coords]
@staticmethod
def display_image_coords_to_canvas_coords(display_image_yx_coords):
"""
Converts display image coordinates to canvas coordinates. This is just a
axis switch operation.
Parameters
----------
display_image_yx_coords : List[Tuple[float, float]]
Returns
-------
List[Tuple[float, float]]
"""
return [(yx[1], yx[0]) for yx in display_image_yx_coords]
def compute_display_scale_factor(self, decimated_image):
"""
Computes the nominal scale factor.
Parameters
----------
decimated_image : numpy.ndarray
Returns
-------
float
"""
# TODO: division may not work as expected in Python 2 (int versus float)
# what is the intent here?
decimated_image_nx = decimated_image.shape[1]
decimated_image_ny = decimated_image.shape[0]
scale_factor_1 = self.canvas_nx/decimated_image_nx
scale_factor_2 = self.canvas_ny/decimated_image_ny
scale_factor = min(scale_factor_1, scale_factor_2)
return scale_factor
def get_decimated_image_data_in_canvas_rect(self, canvas_rect, decimation=None):
"""
Gets the decimated image from the image rectangle.
Parameters
----------
canvas_rect : Tuple|List
decimation : None|int
Returns
-------
numpy.ndarray
"""
full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect)
if decimation is None:
decimation = self.get_decimation_from_canvas_rect(canvas_rect)
return self.get_decimated_image_data_in_full_image_rect(full_image_rect, decimation)
def update_canvas_display_image_from_full_image(self):
"""
Update the image in the canvas.
Returns
-------
None
"""
full_image_rect = (0, 0, self.image_reader.full_image_ny, self.image_reader.full_image_nx)
self.update_canvas_display_image_from_full_image_rect(full_image_rect)
def update_canvas_display_image_from_full_image_rect(self, full_image_rect):
"""
Update the canvas to the given image rectangle.
Parameters
----------
full_image_rect : Tuple|List
Returns
-------
None
"""
self.set_decimation_from_full_image_rect(full_image_rect)
decimated_image_data = self.get_decimated_image_data_in_full_image_rect(full_image_rect, self.decimation_factor)
self.update_canvas_display_from_numpy_array(decimated_image_data)
self.canvas_full_image_upper_left_yx = (full_image_rect[0], full_image_rect[1])
def update_canvas_display_image_from_canvas_rect(self, canvas_rect):
"""
Update the canvas to the given camvas rectangle.
Parameters
----------
canvas_rect : Tuple|List
Returns
-------
None
"""
full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect)
full_image_rect = (int(round(full_image_rect[0])),
int(round(full_image_rect[1])),
int(round(full_image_rect[2])),
int(round(full_image_rect[3])))
self.update_canvas_display_image_from_full_image_rect(full_image_rect)
def update_canvas_display_from_numpy_array(self, image_data):
"""
Update the canvas from a numpy array.
Parameters
----------
image_data : numpy.ndarray
Returns
-------
None
"""
if len(self.drop_bands) > 0:
zeros_image = numpy.zeros_like(image_data[:, :, 0])
for drop_band in self.drop_bands:
image_data[:, :, drop_band] = zeros_image
self.canvas_decimated_image = image_data
if self.scale_to_fit_canvas:
scale_factor = self.compute_display_scale_factor(image_data)
self.display_rescaling_factor = scale_factor
self.display_image = self.get_scaled_display_data(image_data)
else:
self.display_image = image_data
def get_decimation_factor_from_full_image_rect(self, full_image_rect):
"""
Get the decimation factor from the rectangle size.
Parameters
----------
full_image_rect : Tuple|List
Returns
-------
int
"""
ny = full_image_rect[2] - full_image_rect[0]
nx = full_image_rect[3] - full_image_rect[1]
decimation_y = ny / self.canvas_ny
decimation_x = nx / self.canvas_nx
decimation_factor = max(decimation_y, decimation_x)
decimation_factor = int(decimation_factor)
if decimation_factor < 1:
decimation_factor = 1
return decimation_factor
def get_decimation_from_canvas_rect(self, canvas_rect):
"""
Get the decimation factor from the canvas rectangle size.
Parameters
----------
canvas_rect : Tuple|List
Returns
-------
int
"""
full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect)
return self.get_decimation_factor_from_full_image_rect(full_image_rect)
def set_decimation_from_full_image_rect(self, full_image_rect):
"""
Sets the decimation from the image rectangle.
Parameters
----------
full_image_rect : Tuple|List
Returns
-------
None
"""
decimation_factor = self.get_decimation_factor_from_full_image_rect(full_image_rect)
self.decimation_factor = decimation_factor
def canvas_coords_to_full_image_yx(self, canvas_coords):
"""
Gets full coordinates in yx order from canvas coordinates.
Parameters
----------
canvas_coords : Tuple|List
Returns
-------
List
"""
decimation_factor = self.decimation_factor
if self.scale_to_fit_canvas:
decimation_factor = decimation_factor/self.display_rescaling_factor
siz = int(len(canvas_coords)/2)
out = []
for i in range(siz):
out.extend(
(canvas_coords[2*i+1]*decimation_factor + self.canvas_full_image_upper_left_yx[0],
canvas_coords[2 * i] * decimation_factor + self.canvas_full_image_upper_left_yx[1]))
return out
def canvas_rect_to_full_image_rect(self, canvas_rect):
"""
Gets the full image coordinates from the canvas coordinates.
Parameters
----------
canvas_rect : Tuple|List
Returns
-------
Tuple
"""
image_y1, image_x1 = self.canvas_coords_to_full_image_yx([canvas_rect[0], canvas_rect[1]])
image_y2, image_x2 = self.canvas_coords_to_full_image_yx([canvas_rect[2], canvas_rect[3]])
if image_x1 < 0:
image_x1 = 0
if image_y1 < 0:
image_y1 = 0
if image_x2 > self.image_reader.full_image_nx:
image_x2 = self.image_reader.full_image_nx
if image_y2 > self.image_reader.full_image_ny:
image_y2 = self.image_reader.full_image_ny
return image_y1, image_x1, image_y2, image_x2
def full_image_yx_to_canvas_coords(self, full_image_yx):
"""
Gets the canvas coordinates from full image coordinates in yx order.
Parameters
----------
full_image_yx : Tuple|List
Returns
-------
List
"""
decimation_factor = self.decimation_factor
if self.scale_to_fit_canvas:
decimation_factor = decimation_factor / self.display_rescaling_factor
siz = int(len(full_image_yx)/2)
out = []
for i in range(siz):
out.extend(
(float(full_image_yx[2*i+1] - self.canvas_full_image_upper_left_yx[1]) / decimation_factor,
float(full_image_yx[2 * i] - self.canvas_full_image_upper_left_yx[0]) / decimation_factor))
return out
class VectorObject(object):
def __init__(self, vector_type,
tkinter_options,
image_drag_limits=None):
self.type = vector_type
self.tkinter_options = tkinter_options
self.image_coords = None
self.point_size = None
self.image_drag_limits = image_drag_limits
if vector_type == SHAPE_TYPES.RECT or vector_type == SHAPE_TYPES.POLYGON:
self.color = tkinter_options['outline']
elif vector_type == SHAPE_TYPES.LINE or vector_type == SHAPE_TYPES.ARROW:
self.color = tkinter_options['fill']
class AppVariables(object):
"""
The canvas image application variables.
"""
canvas_height = IntegerDescriptor(
'canvas_height', default_value=200,
docstring='The default canvas height, in pixels.') # type: int
canvas_width = IntegerDescriptor(
'canvas_width', default_value=300,
docstring='The default canvas width, in pixels.') # type: int
rect_border_width = IntegerDescriptor(
'rect_border_width', default_value=2,
docstring='The (margin) rectangular border width, in pixels.') # type: int
line_width = IntegerDescriptor(
'line_width', default_value=2,
docstring='The line width, in pixels.') # type: int
point_size = IntegerDescriptor(
'point_size', default_value=3,
docstring='The point size, in pixels.') # type: int
poly_border_width = IntegerDescriptor(
'poly_border_width', default_value=2,
docstring='The polygon border width, in pixels.') # type: int
poly_fill = StringDescriptor(
'poly_fill',
docstring='The polygon fill color(named or hexidecimal string).') # type: Union[None, str]
foreground_color = StringDescriptor(
'foreground_color', default_value='red',
docstring='The foreground color (named or hexidecimal string).') # type: str
image_id = IntegerDescriptor(
'image_id',
docstring='The image id.') # type: int
current_shape_id = IntegerDescriptor(
'current_shape_id',
docstring='The current shape id.') # type: int
current_shape_canvas_anchor_point_xy = IntegerTupleDescriptor(
'current_shape_canvas_anchor_point_xy', length=2,
docstring='The current shape canvas anchor point, in xy order.') # type: Union[None, tuple]
pan_anchor_point_xy = IntegerTupleDescriptor(
'pan_anchor_point_xy', length=2, default_value=(0, 0),
docstring='The pan anchor point, in xy order.') # type: Union[None, tuple]
canvas_image_object = TypedDescriptor(
'canvas_image_object', CanvasImage,
docstring='The canvas image object.') # type: CanvasImage
_tk_im = TypedDescriptor(
'_tk_im', ImageTk.PhotoImage,
docstring='The Tkinter Image.') # type: ImageTk.PhotoImage
# zoom rectangle properties
zoom_rect_id = IntegerDescriptor(
'zoom_rect_id',
docstring='The zoom rectangle id.') # type: int
zoom_rect_color = StringDescriptor(
'zoom_rect_color', default_value='cyan',
docstring='The zoom rectangle color (named or hexidecimal).') # type: str
zoom_rect_border_width = IntegerDescriptor(
'zoom_rect_border_width', default_value=2,
docstring='The zoom rectangle border width, in pixels.') # type: int
# selection rectangle properties
select_rect_id = IntegerDescriptor(
'select_rect_id',
docstring='The select rectangle id.') # type: int
select_rect_color = StringDescriptor(
'select_rect_color', default_value='red',
docstring='The select rectangle color (named or hexidecimal).') # type: str
select_rect_border_width = IntegerDescriptor(
'select_rect_border_width', default_value=2,
docstring='The select rectangle border width, in pixels.') # type: int
# animation properties
animate_zoom = BooleanDescriptor(
'animate_zoom', default_value=True,
docstring='Specifies whether to animate zooming.') # type: bool
n_zoom_animations = IntegerDescriptor(
'n_zoom_animations', default_value=5,
docstring='The number of zoom frames.') # type: int
animate_pan = BooleanDescriptor(
'animate_pan', default_value=False,
docstring='Specifies whether to animate panning.') # type: bool
animation_time_in_seconds = FloatDescriptor(
'animation_time_in_seconds', default_value=0.3,
docstring='The animation time in seconds.') # type: float
# tool identifiers
active_tool = StringDescriptor(
'active_tool',
docstring='The active tool name.') # type: str
current_tool = StringDescriptor(
'current_tool',
docstring='The current tool name.') # type: str
# some configuration properties
vertex_selector_pixel_threshold = FloatDescriptor(
'vertex_selector_pixel_threshold', default_value=10.0,
docstring='The pixel threshold for vertex selection.') # type: float
mouse_wheel_zoom_percent_per_event = FloatDescriptor(
'mouse_wheel_zoom_percent_per_event', default_value=1.5,
docstring='The percent to zoom in/out on mouse wheel detection.') # type: float
highlight_n_colors_cycle = IntegerDescriptor(
'highlight_n_colors_cycle', default_value=10,
docstring='The length of highlight colors cycle.') # type: int
zoom_on_wheel = BooleanDescriptor(
'zoom_on_wheel', default_value=True,
docstring='Zoom on the mouse wheel operation?') # type: bool
rescale_image_to_fit_canvas = BooleanDescriptor(
'rescale_image_to_fit_canvas', default_value=True,
docstring='Rescale the image to fit the canvas?') # type: bool
scale_dynamic_range = BooleanDescriptor(
'scale_dynamic_range', default_value=False,
docstring='Scale the dynamic range of the image?') # type: bool
# some state properties
the_canvas_is_currently_zooming = BooleanDescriptor(
'the_canvas_is_currently_zooming', default_value=False,
docstring='Is the canvas object currently zooming?') # type: bool
actively_drawing_shape = BooleanDescriptor(
'actively_drawing_shape', default_value=False,
docstring='Is the canvas object actively drawing a shape?') # type: bool
tmp_closest_coord_index = IntegerDescriptor(
'tmp_closest_coord_index', default_value=0,
docstring='') # type: int
def __init__(self):
self.shape_ids = [] # type: [int]
self.vector_objects = {} # type: {VectorObject}
self.shape_properties = {}
self.shape_drag_image_coord_limits = {} # type: dict
self.highlight_color_palette = SeabornHexPalettes.blues # type: List[str]
self.tmp_points = None # type: [int]
class ToolConstants:
ZOOM_IN_TOOL = "zoom in"
ZOOM_OUT_TOOL = "zoom out"
DRAW_RECT_BY_DRAGGING = "draw rect by dragging"
DRAW_RECT_BY_CLICKING = "draw rect by clicking"
DRAW_ELLIPSE_BY_DRAGGING = "draw ellipse by dragging"
DRAW_LINE_BY_DRAGGING = "draw line by dragging"
DRAW_LINE_BY_CLICKING = "draw line by clicking"
DRAW_ARROW_BY_DRAGGING = "draw arrow by dragging"
DRAW_ARROW_BY_CLICKING = "draw arrow by clicking"
DRAW_POLYGON_BY_CLICKING = "draw polygon by clicking"
DRAW_POINT_BY_CLICKING = "draw point by clicking"
SELECT_TOOL = "select tool"
SELECT_CLOSEST_SHAPE_TOOL = "select closest shape"
PAN_TOOL = "pan tool"
TRANSLATE_SHAPE_TOOL = "translate shape tool"
EDIT_SHAPE_COORDS_TOOL = "edit shape coords tool"
EDIT_SHAPE_TOOL = "edit shape tool"
class ShapePropertyConstants:
SHAPE_TYPE = "shape type"
CANVAS_COORDS = "canvas coords"
IMAGE_COORDS = "image coords"
POINT_SIZE = "point size"
COLOR = "color"
class ShapeTypeConstants:
POINT = "point"
LINE = "line"
RECT = "rect"
ELLIPSE = "ellipse"
ARROW = "arrow"
POLYGON = "polygon"
TEXT = "text"
SHAPE_PROPERTIES = ShapePropertyConstants()
SHAPE_TYPES = ShapeTypeConstants()
TOOLS = ToolConstants()
class ImageCanvas(basic_widgets.Canvas):
def __init__(self, primary):
"""
Parameters
----------
primary
The primary widget.
"""
osplat = platform.system()
if osplat == "Windows":
import ctypes
user32 = ctypes.windll.user32
user32.SetProcessDPIAware()
basic_widgets.Canvas.__init__(self, primary, highlightthickness=0)
self.pack(fill=tkinter.BOTH, expand=tkinter.NO)
self.variables = AppVariables()
self.variables.zoom_rect_id = self.create_new_rect((0, 0, 1, 1), outline=self.variables.zoom_rect_color, width=self.variables.zoom_rect_border_width)
self.variables.select_rect_id = self.create_new_rect((0, 0, 1, 1), outline=self.variables.select_rect_color, width=self.variables.select_rect_border_width)
# hide the shapes we initialize
self.hide_shape(self.variables.select_rect_id)
self.hide_shape(self.variables.zoom_rect_id)
self.on_left_mouse_click(self.callback_handle_left_mouse_click)
self.on_left_mouse_motion(self.callback_handle_left_mouse_motion)
self.on_left_mouse_release(self.callback_handle_left_mouse_release)
self.on_right_mouse_click(self.callback_handle_right_mouse_click)
self.on_mouse_motion(self.callback_handle_mouse_motion)
self.on_mouse_wheel(self.callback_mouse_zoom)
self.variables.active_tool = None
self.variables.current_shape_id = None
def _set_image_reader(self, image_reader):
"""
Set the image reader.
Parameters
----------
image_reader : ImageReader
Returns
-------
None
"""
self.variables.canvas_image_object = CanvasImage(
image_reader, self.variables.canvas_width, self.variables.canvas_height)
if self.variables.rescale_image_to_fit_canvas:
self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)
else:
self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)
def get_vector_object(self, vector_id):
"""
Parameters
----------
vector_id : int
Returns
-------
VectorObject
"""
return self.variables.vector_objects[str(vector_id)]
def get_canvas_line_length(self, line_id):
"""
Gets the canvas line length.
Parameters
----------
line_id : int
Returns
-------
int
"""
line_coords = self.coords(line_id)
x1 = line_coords[0]
y1 = line_coords[1]
x2 = line_coords[2]
y2 = line_coords[3]
length = numpy.sqrt(numpy.square(x2-x1) + numpy.square(y2-y1))
return length
def get_image_line_length(self, line_id):
"""
Gest the image line length.
Parameters
----------
line_id : int
Returns
-------
int
"""
canvas_line_length = self.get_canvas_line_length(line_id)
return canvas_line_length * self.variables.canvas_image_object.decimation_factor
def hide_shape(self, shape_id):
"""
Hide the shape specified by the provided id.
Parameters
----------
shape_id : int
Returns
-------
None
"""
if shape_id:
self.itemconfigure(shape_id, state="hidden")
def show_shape(self, shape_id):
"""
Show or un-hide the shape specified by the provided id.
Parameters
----------
shape_id : int
Returns
-------
None
"""
if shape_id:
self.itemconfigure(shape_id, state="normal")
def callback_mouse_zoom(self, event):
"""
The mouse zoom callback.
Parameters
----------
event
Returns
-------
None
"""
if self.variables.zoom_on_wheel:
delta = event.delta
single_delta = 120
# handle case where platform is linux:
if platform.system() == "Linux":
delta = single_delta
if event.num == 5:
delta = delta*-1
zoom_in_box_half_width = int(self.variables.canvas_width / self.variables.mouse_wheel_zoom_percent_per_event / 2)
zoom_out_box_half_width = int(self.variables.canvas_width * self.variables.mouse_wheel_zoom_percent_per_event / 2)
zoom_in_box_half_height = int(self.variables.canvas_height / self.variables.mouse_wheel_zoom_percent_per_event / 2)
zoom_out_box_half_height = int(self.variables.canvas_height * self.variables.mouse_wheel_zoom_percent_per_event / 2)
x = event.x
y = event.y
after_zoom_x_offset = (self.variables.canvas_width/2 - x)/self.variables.mouse_wheel_zoom_percent_per_event
after_zoom_y_offset = (self.variables.canvas_height/2 - y)/self.variables.mouse_wheel_zoom_percent_per_event
x_offset_point = x + after_zoom_x_offset
y_offset_point = y + after_zoom_y_offset
zoom_in_box = (x_offset_point - zoom_in_box_half_width,
y_offset_point - zoom_in_box_half_height,
x_offset_point + zoom_in_box_half_width,
y_offset_point + zoom_in_box_half_height)
zoom_out_box = (x_offset_point - zoom_out_box_half_width,
y_offset_point - zoom_out_box_half_height,
x_offset_point + zoom_out_box_half_width,
y_offset_point + zoom_out_box_half_height)
if self.variables.the_canvas_is_currently_zooming:
pass
else:
if delta > 0:
self.zoom_to_selection(zoom_in_box, self.variables.animate_zoom)
else:
self.zoom_to_selection(zoom_out_box, self.variables.animate_zoom)
else:
pass
def animate_with_numpy_frame_sequence(self, numpy_frame_sequence, frames_per_second=15):
"""
Animate with a sequence of numpy arrays.
Parameters
----------
numpy_frame_sequence : List[numpy.ndarray]
frames_per_second : float
Returns
-------
None
"""
sleep_time = 1/frames_per_second
for animation_frame in numpy_frame_sequence:
tic = time.time()
self.set_image_from_numpy_array(animation_frame)
self.update()
toc = time.time()
frame_generation_time = toc-tic
if frame_generation_time < sleep_time:
new_sleep_time = sleep_time - frame_generation_time
time.sleep(new_sleep_time)
else:
pass
def animate_with_pil_frame_sequence(self, pil_frame_sequence, frames_per_second=15):
"""
Animate with a sequence of PIL images.
Parameters
----------
pil_frame_sequence : List[PIL.Image]
frames_per_second : float
Returns
-------
None
"""
sleep_time = 1/frames_per_second
for animation_frame in pil_frame_sequence:
tic = time.time()
self._set_image_from_pil_image(animation_frame)
self.update()
toc = time.time()
frame_generation_time = toc-tic
if frame_generation_time < sleep_time:
new_sleep_time = sleep_time - frame_generation_time
time.sleep(new_sleep_time)
else:
pass
def callback_handle_left_mouse_click(self, event):
"""
Left mouse click callback.
Parameters
----------
event
Returns
-------
"""
if self.variables.active_tool == TOOLS.PAN_TOOL:
self.variables.pan_anchor_point_xy = event.x, event.y
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:
closest_coord_index = self.find_closest_shape_coord(self.variables.current_shape_id, event.x, event.y)
self.variables.tmp_closest_coord_index = closest_coord_index
elif self.variables.active_tool == TOOLS.SELECT_CLOSEST_SHAPE_TOOL:
closest_shape_id = self.find_closest_shape(event.x, event.y)
self.variables.current_shape_id = closest_shape_id
self.highlight_existing_shape(self.variables.current_shape_id)
else:
start_x = self.canvasx(event.x)
start_y = self.canvasy(event.y)
self.variables.current_shape_canvas_anchor_point_xy = (start_x, start_y)
if self.variables.current_shape_id not in self.variables.shape_ids:
coords = (start_x, start_y, start_x + 1, start_y + 1)
if self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:
self.create_new_line(coords)
elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.create_new_line(coords)
self.variables.actively_drawing_shape = True
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:
self.create_new_arrow(coords)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.create_new_arrow(coords)
self.variables.actively_drawing_shape = True
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:
self.create_new_rect(coords)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:
self.create_new_rect(coords)
self.variables.actively_drawing_shape = True
elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:
self.create_new_ellipse(coords)
elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:
self.create_new_point((start_x, start_y))
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.create_new_polygon(coords)
self.variables.actively_drawing_shape = True
else:
print("no tool selected")
else:
if self.variables.current_shape_id in self.variables.shape_ids:
vector_object = self.get_vector_object(self.variables.current_shape_id)
if vector_object.type == SHAPE_TYPES.POINT:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,
(start_x, start_y))
elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.event_click_line(event)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.event_click_line(event)
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.event_click_polygon(event)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:
if self.variables.actively_drawing_shape:
self.variables.actively_drawing_shape = False
else:
self.variables.actively_drawing_shape = True
def callback_handle_left_mouse_release(self, event):
"""
Left mouse release callback.
Parameters
----------
event
Returns
-------
None
"""
if self.variables.active_tool == TOOLS.PAN_TOOL:
self._pan(event)
if self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:
rect_coords = self.coords(self.variables.zoom_rect_id)
self.zoom_to_selection(rect_coords, self.variables.animate_zoom)
self.hide_shape(self.variables.zoom_rect_id)
if self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:
rect_coords = self.coords(self.variables.zoom_rect_id)
x1 = -rect_coords[0]
x2 = self.variables.canvas_width + rect_coords[2]
y1 = -rect_coords[1]
y2 = self.variables.canvas_height + rect_coords[3]
zoom_rect = (x1, y1, x2, y2)
self.zoom_to_selection(zoom_rect, self.variables.animate_zoom)
self.hide_shape(self.variables.zoom_rect_id)
def callback_handle_mouse_motion(self, event):
"""
Mouse motion callback.
Parameters
----------
event
Returns
-------
None
"""
if self.variables.actively_drawing_shape:
if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.event_drag_multipoint_line(event)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.event_drag_multipoint_line(event)
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.event_drag_multipoint_polygon(event)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:
self.event_drag_line(event)
elif self.variables.current_tool == TOOLS.EDIT_SHAPE_TOOL:
vector_object = self.get_vector_object(self.variables.current_shape_id)
if vector_object.type == SHAPE_TYPES.RECT or vector_object.type == SHAPE_TYPES.ELLIPSE:
select_x1, select_y1, select_x2, select_y2 = self.get_shape_canvas_coords(
self.variables.current_shape_id)
select_xul = min(select_x1, select_x2)
select_xlr = max(select_x1, select_x2)
select_yul = min(select_y1, select_y2)
select_ylr = max(select_y1, select_y2)
distance_to_ul = numpy.sqrt(numpy.square(event.x - select_xul) + numpy.square(event.y - select_yul))
distance_to_ur = numpy.sqrt(numpy.square(event.x - select_xlr) + numpy.square(event.y - select_yul))
distance_to_lr = numpy.sqrt(numpy.square(event.x - select_xlr) + numpy.square(event.y - select_ylr))
distance_to_ll = numpy.sqrt(numpy.square(event.x - select_xul) + numpy.square(event.y - select_ylr))
if distance_to_ul < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="top_left_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif distance_to_ur < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="top_right_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif distance_to_lr < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="bottom_right_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif distance_to_ll < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="bottom_left_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif select_xul < event.x < select_xlr and select_yul < event.y < select_ylr:
self.config(cursor="fleur")
self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL
else:
self.config(cursor="arrow")
self.variables.active_tool = None
elif vector_object.type == SHAPE_TYPES.LINE or vector_object.type == SHAPE_TYPES.ARROW:
canvas_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
x_coords = canvas_coords[0::2]
y_coords = canvas_coords[1::2]
distance_to_vertex = numpy.sqrt(numpy.square(event.x - x_coords[0]) +
numpy.square(event.y - y_coords[0]))
p2 = numpy.asarray((x_coords[1], y_coords[1]))
p1 = numpy.asarray((x_coords[0], y_coords[0]))
p3 = numpy.asarray((event.x, event.y))
distance_to_line = norm(numpy.cross(p2 - p1, p1 - p3)) / norm(p2 - p1)
for xy in zip(x_coords, y_coords):
vertex_distance = numpy.sqrt(numpy.square(event.x - xy[0]) + numpy.square(event.y - xy[1]))
if vertex_distance < distance_to_vertex:
distance_to_vertex = vertex_distance
if distance_to_vertex < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="target")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif distance_to_line < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="fleur")
self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL
else:
self.config(cursor="arrow")
self.variables.active_tool = None
elif vector_object.type == SHAPE_TYPES.LINE or vector_object.type == SHAPE_TYPES.POLYGON:
canvas_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
x_coords = canvas_coords[0::2]
y_coords = canvas_coords[1::2]
xy_points = [xy for xy in zip(x_coords, y_coords)]
distance_to_vertex = numpy.sqrt(numpy.square(event.x - x_coords[0]) +
numpy.square(event.y - y_coords[0]))
for xy in zip(x_coords, y_coords):
vertex_distance = numpy.sqrt(numpy.square(event.x - xy[0]) + numpy.square(event.y - xy[1]))
if vertex_distance < distance_to_vertex:
distance_to_vertex = vertex_distance
if distance_to_vertex < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="target")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif polygon_utils.point_inside_polygon(event.x, event.y, xy_points):
self.config(cursor="fleur")
self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL
else:
self.config(cursor="arrow")
self.variables.active_tool = None
elif vector_object.type == SHAPE_TYPES.POINT:
canvas_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
distance_to_point = numpy.sqrt(numpy.square(event.x - canvas_coords[0]) +
numpy.square(event.y - canvas_coords[1]))
if distance_to_point < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="fleur")
self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL
def callback_handle_left_mouse_motion(self, event):
"""
Left mouse motion callback.
Parameters
----------
event
Returns
-------
None
"""
# TODO: update this for the case where there is no current shape id
vector_object = self.get_vector_object(self.variables.current_shape_id)
if self.variables.active_tool == TOOLS.PAN_TOOL:
x_dist = event.x - self.variables.tmp_anchor_point[0]
y_dist = event.y - self.variables.tmp_anchor_point[1]
self.move(self.variables.image_id, x_dist, y_dist)
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:
x_dist = event.x - self.variables.tmp_anchor_point[0]
y_dist = event.y - self.variables.tmp_anchor_point[1]
t_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
new_coords = numpy.asarray(t_coords) + x_dist
new_coords_y = numpy.asarray(t_coords) + y_dist
new_coords[1::2] = new_coords_y[1::2]
if vector_object.image_drag_limits:
canvas_limits = self.image_coords_to_canvas_coords(vector_object.image_drag_limits)
x_vertices = new_coords[0::2]
y_vertices = new_coords[1::2]
within_x_limits = True
within_y_limits = True
for x_vertex in x_vertices:
if canvas_limits[2] < x_vertex or x_vertex < canvas_limits[0]:
within_x_limits = False
for y_vertex in y_vertices:
if y_vertex < canvas_limits[1] or y_vertex > canvas_limits[3]:
within_y_limits = False
if not within_x_limits:
new_coords[0::2] = t_coords[0::2]
if not within_y_limits:
new_coords[1::2] = t_coords[1::2]
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,
new_coords,
update_pixel_coords=True)
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:
previous_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
coord_x_index = self.variables.tmp_closest_coord_index*2
coord_y_index = coord_x_index + 1
new_coords = list(previous_coords)
new_coords[coord_x_index] = event.x
new_coords[coord_y_index] = event.y
if vector_object.image_drag_limits:
drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = \
self.image_coords_to_canvas_coords(vector_object.image_drag_limits)
if new_coords[coord_x_index] < drag_x_lim_1:
new_coords[coord_x_index] = drag_x_lim_1
if new_coords[coord_x_index] > drag_x_lim_2:
new_coords[coord_x_index] = drag_x_lim_2
if new_coords[coord_y_index] < drag_y_lim_1:
new_coords[coord_y_index] = drag_y_lim_1
if new_coords[coord_y_index] > drag_y_lim_2:
new_coords[coord_y_index] = drag_y_lim_2
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, tuple(new_coords))
elif self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.SELECT_TOOL:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (event.x, event.y))
def highlight_existing_shape(self, shape_id):
"""
Highlights an existing shape, according to provided id.
Parameters
----------
shape_id : int
Returns
-------
None
"""
original_color = self.get_vector_object(shape_id).color
colors = color_utils.get_full_hex_palette(self.variables.highlight_color_palette, self.variables.highlight_n_colors_cycle)
for color in colors:
self.change_shape_color(shape_id, color)
time.sleep(0.001)
self.update()
colors.reverse()
for color in colors:
self.change_shape_color(shape_id, color)
time.sleep(0.001)
self.update()
self.change_shape_color(shape_id, original_color)
# noinspection PyUnusedLocal
def callback_handle_right_mouse_click(self, event):
"""
Callback for right mouse click.
Parameters
----------
event
Returns
-------
None
"""
if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.variables.actively_drawing_shape = False
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.variables.actively_drawing_shape = False
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.variables.actively_drawing_shape = False
def set_image_from_numpy_array(self, numpy_data):
"""
This is the default way to set and display image data. All other methods
to update images should ultimately call this.
Parameters
----------
numpy_data : numpy.ndarray
Returns
-------
None
"""
if self.variables.scale_dynamic_range:
min_data = numpy.min(numpy_data)
dynamic_range = numpy.max(numpy_data) - min_data
numpy_data = numpy.asanyarray(
255*(numpy_data - min_data)/dynamic_range, dtype=numpy.uint8)
pil_image = PIL.Image.fromarray(numpy_data)
self._set_image_from_pil_image(pil_image)
def set_canvas_size(self, width_npix, height_npix):
"""
Set the canvas size.
Parameters
----------
width_npix : int|float
height_npix : int|float
Returns
-------
None
"""
self.variables.canvas_width = width_npix
self.variables.canvas_height = height_npix
if self.variables.canvas_image_object is not None:
self.variables.canvas_image_object.canvas_nx = width_npix
self.variables.canvas_image_object.canvas_ny = height_npix
self.config(width=width_npix, height=height_npix)
def modify_existing_shape_using_canvas_coords(self, shape_id, new_coords, update_pixel_coords=True):
"""
Modify an existing shape.
Parameters
----------
shape_id : int
new_coords : Tuple|List
update_pixel_coords : bool
Returns
-------
None
"""
vector_object = self.get_vector_object(shape_id)
if vector_object.type == SHAPE_TYPES.POINT:
point_size = vector_object.point_size
x1, y1 = (new_coords[0] - point_size), (new_coords[1] - point_size)
x2, y2 = (new_coords[0] + point_size), (new_coords[1] + point_size)
canvas_drawing_coords = (x1, y1, x2, y2)
else:
canvas_drawing_coords = tuple(new_coords)
self.coords(shape_id, canvas_drawing_coords)
if update_pixel_coords:
self.set_shape_pixel_coords_from_canvas_coords(shape_id, new_coords)
def modify_existing_shape_using_image_coords(self, shape_id, image_coords):
"""
Modify an existing shape.
Parameters
----------
shape_id : int
image_coords : Tuple|List
Returns
-------
None
"""
self.set_shape_pixel_coords(shape_id, image_coords)
canvas_coords = self.image_coords_to_canvas_coords(image_coords)
self.modify_existing_shape_using_canvas_coords(shape_id, canvas_coords, update_pixel_coords=False)
def event_drag_multipoint_line(self, event):
"""
Drag multipoint line callback.
Parameters
----------
event
Returns
-------
None
"""
if self.variables.current_shape_id:
self.show_shape(self.variables.current_shape_id)
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
coords = self.coords(self.variables.current_shape_id)
new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]
vector_object = self.get_vector_object(self.variables.current_shape_id)
if vector_object.type == SHAPE_TYPES.ARROW or vector_object.type == SHAPE_TYPES.LINE:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
else:
pass
def event_drag_multipoint_polygon(self, event):
"""
Drag a polygon callback.
Parameters
----------
event
Returns
-------
None
"""
if self.variables.current_shape_id:
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits
if drag_lims:
canvas_lims = self.image_coords_to_canvas_coords(drag_lims)
if event_x_pos < canvas_lims[0]:
event_x_pos = canvas_lims[0]
elif event_x_pos > canvas_lims[2]:
event_x_pos = canvas_lims[2]
if event_y_pos < canvas_lims[1]:
event_y_pos = canvas_lims[1]
elif event_y_pos > canvas_lims[3]:
event_y_pos = canvas_lims[3]
self.show_shape(self.variables.current_shape_id)
coords = self.coords(self.variables.current_shape_id)
new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
else:
pass
def event_drag_line(self, event):
"""
Drag a line callback.
Parameters
----------
event
Returns
-------
None
"""
if self.variables.current_shape_id:
self.show_shape(self.variables.current_shape_id)
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
if self.get_vector_object(self.variables.current_shape_id).image_drag_limits:
drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits
canvas_lims = self.image_coords_to_canvas_coords(drag_lims)
if event_x_pos < canvas_lims[0]:
event_x_pos = canvas_lims[0]
elif event_x_pos > canvas_lims[2]:
event_x_pos = canvas_lims[2]
if event_y_pos < canvas_lims[1]:
event_y_pos = canvas_lims[1]
elif event_y_pos > canvas_lims[3]:
event_y_pos = canvas_lims[3]
self.modify_existing_shape_using_canvas_coords(
self.variables.current_shape_id,
(self.variables.current_shape_canvas_anchor_point_xy[0],
self.variables.current_shape_canvas_anchor_point_xy[1],
event_x_pos, event_y_pos))
def event_click_line(self, event):
"""
Click a line callback.
Parameters
----------
event
Returns
-------
None
"""
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
if self.get_vector_object(self.variables.current_shape_id).image_drag_limits:
drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits
canvas_lims = self.image_coords_to_canvas_coords(drag_lims)
if event_x_pos < canvas_lims[0]:
event_x_pos = canvas_lims[0]
elif event_x_pos > canvas_lims[2]:
event_x_pos = canvas_lims[2]
if event_y_pos < canvas_lims[1]:
event_y_pos = canvas_lims[1]
elif event_y_pos > canvas_lims[3]:
event_y_pos = canvas_lims[3]
if self.variables.actively_drawing_shape:
old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
new_coords = tuple(list(old_coords) + [event_x_pos, event_y_pos])
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
else:
new_coords = (event_x_pos, event_y_pos, event_x_pos + 1, event_y_pos + 1)
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
self.variables.actively_drawing_shape = True
def delete_shape(self, shape_id):
"""
Deletes a shape by its id.
Parameters
----------
shape_id : int
Returns
-------
None
"""
self.variables.shape_ids.remove(shape_id)
del self.variables.vector_objects[str(shape_id)]
self.delete(shape_id)
if shape_id == self.variables.current_shape_id:
self.variables.current_shape_id = None
def event_click_polygon(self, event):
"""
Click a polygon callback.
Parameters
----------
event
Returns
-------
None
"""
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits
if drag_lims:
canvas_lims = self.image_coords_to_canvas_coords(drag_lims)
if event_x_pos < canvas_lims[0]:
event_x_pos = canvas_lims[0]
elif event_x_pos > canvas_lims[2]:
event_x_pos = canvas_lims[2]
if event_y_pos < canvas_lims[1]:
event_y_pos = canvas_lims[1]
elif event_y_pos > canvas_lims[3]:
event_y_pos = canvas_lims[3]
if self.variables.actively_drawing_shape:
old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
new_coords = list(old_coords) + [event_x_pos, event_y_pos]
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
# re-initialize shape if we're not actively drawing
else:
new_coords = (event.x, event.y, event_x_pos+1, event_y_pos+1)
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
self.variables.actively_drawing_shape = True
def create_new_text(self, *args, **kw):
"""Create text with coordinates x1,y1."""
shape_id = self._create('text', args, kw)
self.variables.shape_ids.append(shape_id)
canvas_coords = args[0]
self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.TEXT, None)
self.variables.shape_ids.append(shape_id)
self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_rect(self, canvas_coords, **options):
"""
Create a new rectangle.
Parameters
----------
canvas_coords : Tuple|List
options
Optional Keyword arguments.
Returns
-------
int
"""
if 'outline' not in options:
options['outline'] = self.variables.foreground_color
if 'width' not in options:
options['width'] = self.variables.rect_border_width
shape_id = self.create_rectangle(*canvas_coords, **options)
self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.RECT, options)
self.variables.shape_ids.append(shape_id)
self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_ellipse(self, canvas_coords, **options):
"""
Create a new rectangle.
Parameters
----------
canvas_coords : Tuple|List
options
Optional Keyword arguments.
Returns
-------
int
"""
if 'outline' not in options:
options['outline'] = self.variables.foreground_color
if 'width' not in options:
options['width'] = self.variables.rect_border_width
shape_id = self.create_oval(*canvas_coords, **options)
self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.RECT, options)
self.variables.shape_ids.append(shape_id)
self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_polygon(self, coords, **options):
"""
Create a new polygon.
Parameters
----------
coords : Tuple|List
options
Optional keyword arguments.
Returns
-------
int
"""
if 'outline' not in options:
options['outline'] = self.variables.foreground_color
if 'width' not in options:
options['width'] = self.variables.poly_border_width
if 'fill' not in options:
options['fill'] = ''
shape_id = self.create_polygon(*coords, **options)
self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POLYGON, options)
self.variables.shape_ids.append(shape_id)
self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_arrow(self, coords, **options):
"""
Create a new arrow.
Parameters
----------
coords : Tuple|List
options
Optional keyword arguments.
Returns
-------
int
"""
if 'fill' not in options:
options['fill'] = self.variables.foreground_color
if 'width' not in options:
options['width'] = self.variables.line_width
if 'arrow' not in options:
options['arrow'] = tkinter.LAST
shape_id = self.create_line(*coords, **options)
self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.ARROW, options)
self.variables.shape_ids.append(shape_id)
self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_line(self, coords, **options):
"""
Create a new line.
Parameters
----------
coords : Tuple|List
options
Optional keyword arguments.
Returns
-------
int
"""
if 'fill' not in options:
options['fill'] = self.variables.foreground_color
if 'width' not in options:
options['width'] = self.variables.line_width
shape_id = self.create_line(*coords, **options)
self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.LINE, options)
self.variables.shape_ids.append(shape_id)
self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_point(self, coords, **options):
"""
Create a new point.
Parameters
----------
coords : Tuple|List
options
Optional keyword arguments.
Returns
-------
int
"""
if 'fill' not in options:
options['fill'] = self.variables.foreground_color
x1, y1 = (coords[0] - self.variables.point_size), (coords[1] - self.variables.point_size)
x2, y2 = (coords[0] + self.variables.point_size), (coords[1] + self.variables.point_size)
shape_id = self.create_oval(x1, y1, x2, y2, **options)
self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POINT, options)
self.variables.vector_objects[str(shape_id)].point_size = self.variables.point_size
self.variables.shape_ids.append(shape_id)
self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)
self.variables.current_shape_id = shape_id
return shape_id
def change_shape_color(self, shape_id, color):
"""
Change the shape color.
Parameters
----------
shape_id : int
color : str
Returns
-------
None
"""
vector_object = self.get_vector_object(shape_id)
shape_type = vector_object.type
if shape_type == SHAPE_TYPES.RECT or shape_type == SHAPE_TYPES.POLYGON:
self.itemconfig(shape_id, outline=color)
vector_object.tkinter_options['outline'] = color
else:
self.itemconfig(shape_id, fill=color)
vector_object.tkinter_options['fill'] = color
def set_shape_pixel_coords_from_canvas_coords(self, shape_id, coords):
"""
Sets the shape pixel coordinates from the canvas coordinates.
Parameters
----------
shape_id : int
Returns
-------
None
"""
if self.variables.canvas_image_object:
image_coords = self.canvas_coords_to_image_coords(coords)
self.set_shape_pixel_coords(shape_id, image_coords)
def set_shape_pixel_coords(self, shape_id, image_coords):
"""
Set the pixel coordinates for the given shape.
Parameters
----------
shape_id : int
image_coords : Tuple|List
Returns
-------
None
"""
vector_object = self.get_vector_object(shape_id)
vector_object.image_coords = image_coords
def canvas_coords_to_image_coords(self, canvas_coords):
"""
Converts the canvas coordinates to image coordinates.
Parameters
----------
canvas_coords : tuple
Returns
-------
tuple
"""
return self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_coords)
def get_shape_canvas_coords(self, shape_id):
"""
Fetches the canvas coordinates for the shape.
Parameters
----------
shape_id : int
Returns
-------
Tuple
"""
return self.image_coords_to_canvas_coords(self.get_vector_object(shape_id).image_coords)
def get_shape_image_coords(self, shape_id):
"""
Fetches the image coordinates for the shape.
Parameters
----------
shape_id : int
Returns
-------
Tuple
"""
return self.get_vector_object(shape_id).image_coords
def shape_image_coords_to_canvas_coords(self, shape_id):
"""
Converts the image coordinates to the shapoe coordinates.
Parameters
----------
shape_id : int
Returns
-------
Tuple
"""
image_coords = self.get_shape_image_coords(shape_id)
return self.variables.canvas_image_object.full_image_yx_to_canvas_coords(image_coords)
def image_coords_to_canvas_coords(self, image_coords):
"""
Converts the image coordinates to the shapoe coordinates.
Parameters
----------
image_coords : tuple
Returns
-------
Tuple
"""
return self.variables.canvas_image_object.full_image_yx_to_canvas_coords(image_coords)
def get_image_data_in_canvas_rect_by_id(self, rect_id, decimation=None):
"""
Fetches the image data.
Parameters
----------
rect_id : int
decimation : None|int
Returns
-------
numpy.ndarray
"""
image_coords = self.get_shape_image_coords(rect_id)
tmp_image_coords = list(image_coords)
if image_coords[0] > image_coords[2]:
tmp_image_coords[0] = image_coords[2]
tmp_image_coords[2] = image_coords[0]
if image_coords[1] > image_coords[3]:
tmp_image_coords[1] = image_coords[3]
tmp_image_coords[3] = image_coords[1]
if decimation is None:
decimation = self.variables.canvas_image_object.\
get_decimation_factor_from_full_image_rect(tmp_image_coords)
tmp_image_coords = (int(tmp_image_coords[0]), int(tmp_image_coords[1]), int(tmp_image_coords[2]), int(tmp_image_coords[3]))
image_data_in_rect = self.variables.canvas_image_object.\
get_decimated_image_data_in_full_image_rect(tmp_image_coords, decimation)
return image_data_in_rect
def zoom_to_selection(self, canvas_rect, animate=False):
"""
Zoom to the selection using canvas coordinates.
Parameters
----------
canvas_rect : Tuple|List
animate : bool
Returns
-------
None
"""
self.variables.the_canvas_is_currently_zooming = True
# fill up empty canvas space due to inconsistent ratios between the canvas rect and the canvas dimensions
image_coords = self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_rect)
self.zoom_to_full_image_selection(image_coords, animate=animate)
def zoom_to_full_image_selection(self, image_rect, animate=False):
"""
Zoom to the selection using image coordinates.
Parameters
----------
image_rect_rect : Tuple|List
animate : bool
Returns
-------
None
"""
zoomed_image_height = image_rect[2] - image_rect[0]
zoomed_image_width = image_rect[3] - image_rect[1]
canvas_height_width_ratio = self.variables.canvas_height / self.variables.canvas_width
zoomed_image_height_width_ratio = zoomed_image_height / zoomed_image_width
new_image_width = zoomed_image_height / canvas_height_width_ratio
new_image_height = zoomed_image_width * canvas_height_width_ratio
if zoomed_image_height_width_ratio > canvas_height_width_ratio:
image_zoom_point_center = (image_rect[3] + image_rect[1]) / 2
image_rect[1] = image_zoom_point_center - new_image_width / 2
image_rect[3] = image_zoom_point_center + new_image_width / 2
else:
image_zoom_point_center = (image_rect[2] + image_rect[0]) / 2
image_rect[0] = image_zoom_point_center - new_image_height / 2
image_rect[2] = image_zoom_point_center + new_image_height / 2
# keep the rect within the image bounds
image_y_ul = max(image_rect[0], 0)
image_x_ul = max(image_rect[1], 0)
image_y_br = min(image_rect[2], self.variables.canvas_image_object.image_reader.full_image_ny)
image_x_br = min(image_rect[3], self.variables.canvas_image_object.image_reader.full_image_nx)
# re-adjust if we ran off one of the edges
if image_x_ul == 0:
image_rect[3] = new_image_width
if image_x_br == self.variables.canvas_image_object.image_reader.full_image_nx:
image_rect[1] = self.variables.canvas_image_object.image_reader.full_image_nx - new_image_width
if image_y_ul == 0:
image_rect[2] = new_image_height
if image_y_br == self.variables.canvas_image_object.image_reader.full_image_ny:
image_rect[0] = self.variables.canvas_image_object.image_reader.full_image_ny - new_image_height
# keep the rect within the image bounds
image_y_ul = max(image_rect[0], 0)
image_x_ul = max(image_rect[1], 0)
image_y_br = min(image_rect[2], self.variables.canvas_image_object.image_reader.full_image_ny)
image_x_br = min(image_rect[3], self.variables.canvas_image_object.image_reader.full_image_nx)
new_canvas_rect = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(
(image_y_ul, image_x_ul, image_y_br, image_x_br))
new_canvas_rect = (
int(new_canvas_rect[0]), int(new_canvas_rect[1]), int(new_canvas_rect[2]), int(new_canvas_rect[3]))
background_image = self.variables.canvas_image_object.display_image
self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(new_canvas_rect)
if self.variables.rescale_image_to_fit_canvas:
new_image = PIL.Image.fromarray(self.variables.canvas_image_object.display_image)
else:
new_image = PIL.Image.fromarray(self.variables.canvas_image_object.canvas_decimated_image)
if animate is True:
# create frame sequence
n_animations = self.variables.n_zoom_animations
background_image = background_image / 2
background_image = numpy.asarray(background_image, dtype=numpy.uint8)
canvas_x1, canvas_y1, canvas_x2, canvas_y2 = new_canvas_rect
display_x_ul = min(canvas_x1, canvas_x2)
display_x_br = max(canvas_x1, canvas_x2)
display_y_ul = min(canvas_y1, canvas_y2)
display_y_br = max(canvas_y1, canvas_y2)
x_diff = new_image.width - (display_x_br - display_x_ul)
y_diff = new_image.height - (display_y_br - display_y_ul)
pil_background_image = PIL.Image.fromarray(background_image)
frame_sequence = []
for i in range(n_animations):
new_x_ul = int(display_x_ul * (1 - i / (n_animations - 1)))
new_y_ul = int(display_y_ul * (1 - i / (n_animations - 1)))
new_size_x = int((display_x_br - display_x_ul) + x_diff * (i / (n_animations - 1)))
new_size_y = int((display_y_br - display_y_ul) + y_diff * (i / (n_animations - 1)))
resized_zoom_image = new_image.resize((new_size_x, new_size_y))
animation_image = pil_background_image.copy()
animation_image.paste(resized_zoom_image, (new_x_ul, new_y_ul))
frame_sequence.append(animation_image)
fps = n_animations / self.variables.animation_time_in_seconds
self.animate_with_pil_frame_sequence(frame_sequence, frames_per_second=fps)
if self.variables.rescale_image_to_fit_canvas:
self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)
else:
self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)
self.update()
self.redraw_all_shapes()
self.variables.the_canvas_is_currently_zooming = False
def update_current_image(self):
"""
Updates the current image.
Returns
-------
None
"""
rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)
if self.variables.canvas_image_object is not None:
self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)
self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)
self.update()
def redraw_all_shapes(self):
"""
Redraw all the shapes.
Returns
-------
None
"""
for shape_id in self.variables.shape_ids:
pixel_coords = self.get_vector_object(shape_id).image_coords
if pixel_coords:
new_canvas_coords = self.shape_image_coords_to_canvas_coords(shape_id)
self.modify_existing_shape_using_canvas_coords(shape_id, new_canvas_coords, update_pixel_coords=False)
def set_current_tool_to_select_closest_shape(self):
"""
Sets the tool to the closest shape.
Returns
-------
None
"""
self.variables.active_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL
self.variables.current_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL
def set_current_tool_to_zoom_out(self):
"""
Sets the current tool to zoom out.
Returns
-------
None
"""
self.variables.current_shape_id = self.variables.zoom_rect_id
self.variables.active_tool = TOOLS.ZOOM_OUT_TOOL
self.variables.current_tool = TOOLS.ZOOM_OUT_TOOL
def set_current_tool_to_zoom_in(self):
"""
Sets the current tool to zoom in.
Returns
-------
None
"""
self.variables.current_shape_id = self.variables.zoom_rect_id
self.variables.active_tool = TOOLS.ZOOM_IN_TOOL
self.variables.current_tool = TOOLS.ZOOM_IN_TOOL
def set_current_tool_to_draw_rect(self, rect_id=None):
"""
Sets the current tool to draw rectangle.
Parameters
----------
rect_id : int|None
Returns
-------
None
"""
self.variables.current_shape_id = rect_id
self.show_shape(rect_id)
self.variables.active_tool = TOOLS.DRAW_RECT_BY_DRAGGING
self.variables.current_tool = TOOLS.DRAW_RECT_BY_DRAGGING
def set_current_tool_to_draw_ellipse(self, ellipse_id=None):
"""
Sets the current tool to draw rectangle.
Parameters
----------
rect_id : int|None
Returns
-------
None
"""
self.variables.current_shape_id = ellipse_id
self.show_shape(ellipse_id)
self.variables.active_tool = TOOLS.DRAW_ELLIPSE_BY_DRAGGING
self.variables.current_tool = TOOLS.DRAW_ELLIPSE_BY_DRAGGING
def set_current_tool_to_draw_rect_by_clicking(self, rect_id=None):
"""
Sets the current tool to draw rectangle by clicking.
Parameters
----------
rect_id : None|int
Returns
-------
None
"""
self.variables.current_shape_id = rect_id
self.show_shape(rect_id)
self.variables.active_tool = TOOLS.DRAW_RECT_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_RECT_BY_CLICKING
def set_current_tool_to_selection_tool(self):
"""
Sets the current tool to the selection tool.
Returns
-------
None
"""
self.variables.current_shape_id = self.variables.select_rect_id
self.variables.active_tool = TOOLS.SELECT_TOOL
self.variables.current_tool = TOOLS.SELECT_TOOL
def set_current_tool_to_draw_line_by_dragging(self, line_id=None):
"""
Sets the current tool to draw line by dragging.
Parameters
----------
line_id : None|int
Returns
-------
None
"""
self.variables.current_shape_id = line_id
self.show_shape(line_id)
self.variables.active_tool = TOOLS.DRAW_LINE_BY_DRAGGING
self.variables.current_tool = TOOLS.DRAW_LINE_BY_DRAGGING
def set_current_tool_to_draw_line_by_clicking(self, line_id=None):
"""
Sets the current tool to draw line by clicking.
Parameters
----------
line_id : None|int
Returns
-------
None
"""
self.variables.current_shape_id = line_id
self.show_shape(line_id)
self.variables.active_tool = TOOLS.DRAW_LINE_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_LINE_BY_CLICKING
def set_current_tool_to_draw_arrow_by_dragging(self, arrow_id=None):
"""
Sets the current tool to draw arrow by dragging.
Parameters
----------
arrow_id : None|int
Returns
-------
None
"""
self.variables.current_shape_id = arrow_id
self.show_shape(arrow_id)
self.variables.active_tool = TOOLS.DRAW_ARROW_BY_DRAGGING
self.variables.current_tool = TOOLS.DRAW_ARROW_BY_DRAGGING
def set_current_tool_to_draw_arrow_by_clicking(self, arrow_id=None):
"""
Sets the current tool to draw arrow by clicking.
Parameters
----------
arrow_id : None|int
Returns
-------
None
"""
self.variables.current_shape_id = arrow_id
self.show_shape(arrow_id)
self.variables.active_tool = TOOLS.DRAW_ARROW_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_ARROW_BY_CLICKING
def set_current_tool_to_draw_polygon_by_clicking(self, polygon_id=None):
"""
Sets the current tool to draw polygon by clicking.
Parameters
----------
polygon_id : None|int
Returns
-------
None
"""
self.variables.current_shape_id = polygon_id
self.show_shape(polygon_id)
self.variables.active_tool = TOOLS.DRAW_POLYGON_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_POLYGON_BY_CLICKING
def set_current_tool_to_draw_point(self, point_id=None):
"""
Sets the current tool to draw point.
Parameters
----------
point_id : None|int
Returns
-------
None
"""
self.variables.current_shape_id = point_id
self.show_shape(point_id)
self.variables.active_tool = TOOLS.DRAW_POINT_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_POINT_BY_CLICKING
def set_current_tool_to_translate_shape(self):
"""
Sets the current tool to translate shape.
Returns
-------
None
"""
self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL
self.variables.current_tool = TOOLS.TRANSLATE_SHAPE_TOOL
def set_current_tool_to_none(self):
"""
Sets the current tool to None.
Returns
-------
None
"""
self.variables.active_tool = None
self.variables.current_tool = None
def set_current_tool_to_edit_shape(self):
"""
Sets the current tool to edit shape.
Returns
-------
None
"""
self.variables.active_tool = TOOLS.EDIT_SHAPE_TOOL
self.variables.current_tool = TOOLS.EDIT_SHAPE_TOOL
def set_current_tool_to_edit_shape_coords(self):
"""
Sets the current tool to edit shape coordinates.
Returns
-------
None
"""
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
self.variables.current_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
def set_current_tool_to_pan(self):
"""
Sets the current tool to pan.
Returns
-------
None
"""
self.variables.active_tool = TOOLS.PAN_TOOL
self.variables.current_tool = TOOLS.PAN_TOOL
def _set_image_from_pil_image(self, pil_image):
"""
Set image from a PIL image.
Parameters
----------
pil_image : PIL.Image
Returns
-------
None
"""
nx_pix, ny_pix = pil_image.size
self.config(scrollregion=(0, 0, nx_pix, ny_pix))
self.variables._tk_im = ImageTk.PhotoImage(pil_image)
self.variables.image_id = self.create_image(0, 0, anchor="nw", image=self.variables._tk_im)
self.tag_lower(self.variables.image_id)
def _pan(self, event):
"""
A pan event.
Parameters
----------
event
Returns
-------
None
"""
new_canvas_x_ul = self.variables.pan_anchor_point_xy[0] - event.x
new_canvas_y_ul = self.variables.pan_anchor_point_xy[1] - event.y
new_canvas_x_br = new_canvas_x_ul + self.variables.canvas_width
new_canvas_y_br = new_canvas_y_ul + self.variables.canvas_height
canvas_coords = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br)
image_coords = self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_coords)
image_y_ul = image_coords[0]
image_x_ul = image_coords[1]
image_y_br = image_coords[2]
image_x_br = image_coords[3]
# TODO: fix this, it just snaps back to the original position if the x or y coords are less than zero
if image_y_ul < 0:
new_canvas_y_ul = 0
new_canvas_y_br = self.variables.canvas_height
if image_x_ul < 0:
new_canvas_x_ul = 0
new_canvas_x_br = self.variables.canvas_width
if image_y_br > self.variables.canvas_image_object.image_reader.full_image_ny:
image_y_br = self.variables.canvas_image_object.image_reader.full_image_ny
new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(
(image_y_br, image_x_br))
new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int(
new_canvas_y_br - self.variables.canvas_height)
if image_x_br > self.variables.canvas_image_object.image_reader.full_image_nx:
image_x_br = self.variables.canvas_image_object.image_reader.full_image_nx
new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(
(image_y_br, image_x_br))
new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int(
new_canvas_y_br - self.variables.canvas_height)
canvas_rect = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br)
self.zoom_to_selection(canvas_rect, self.variables.animate_pan)
self.hide_shape(self.variables.zoom_rect_id)
def config_do_not_scale_image_to_fit(self):
"""
Set configuration to not scale image to fit.
Returns
-------
None
"""
# establish scrollbars
self.sbarv = tkinter.Scrollbar(self, orient=tkinter.VERTICAL)
self.sbarh = tkinter.Scrollbar(self, orient=tkinter.HORIZONTAL)
self.sbarv.config(command=self.yview)
self.sbarh.config(command=self.xview)
self.config(yscrollcommand=self.sbarv.set)
self.config(xscrollcommand=self.sbarh.set)
self.sbarv.grid(row=0, column=1, stick=tkinter.N+tkinter.S)
self.sbarh.grid(row=1, column=0, sticky=tkinter.E+tkinter.W)
# TODO: this should have png -> image or image_file.
# It's not the full canvas? This is confusing.
def save_full_canvas_as_png(self, output_fname):
"""
Save the canvas as a image file.
Parameters
----------
output_fname : str
The path of the output file.
Returns
-------
None
"""
# put a sleep in here in case there is a dialog covering the screen
# before this method is called.
time.sleep(0.1)
# TODO: are we missing a PIL.Image conversion here?
im = self.save_currently_displayed_canvas_to_numpy_array()
im.save(output_fname)
# TODO: figure out proper offsets, the current solution is close but not perfect
def save_currently_displayed_canvas_to_numpy_array(self):
"""
Export the currently displayed canvas as a numpy array.
Returns
-------
numpy.ndarray
"""
x_ul = self.winfo_rootx() + 1
y_ul = self.winfo_rooty() + 1
x_lr = x_ul + self.variables.canvas_width
y_lr = y_ul + self.variables.canvas_height
im = ImageGrab.grab()
im = im.crop((x_ul, y_ul, x_lr, y_lr))
return im
# noinspection PyUnusedLocal
def activate_color_selector(self, event):
"""
The activate color selector callback function.
Parameters
----------
event
Returns
-------
None
"""
color = colorchooser.askcolor()[1]
self.variables.foreground_color = color
self.change_shape_color(self.variables.current_shape_id, color)
def find_closest_shape_coord(self, shape_id, canvas_x, canvas_y):
"""
Finds the closest shape to the provided coordinates, and returns its id.
Parameters
----------
shape_id : int
canvas_x : int
canvas_y : int
Returns
-------
int
"""
vector_object = self.get_vector_object(self.variables.current_shape_id)
coords = self.get_shape_canvas_coords(shape_id)
if vector_object.type == SHAPE_TYPES.RECT:
select_x1, select_y1, select_x2, select_y2 = coords
select_xul = min(select_x1, select_x2)
select_xlr = max(select_x1, select_x2)
select_yul = min(select_y1, select_y2)
select_ylr = max(select_y1, select_y2)
ul = (select_xul, select_yul)
ur = (select_xlr, select_yul)
lr = (select_xlr, select_ylr)
ll = (select_xul, select_ylr)
rect_coords = [(select_x1, select_y1), (select_x2, select_y2)]
all_coords = [ul, ur, lr, ll]
squared_distances = []
for corner_coord in all_coords:
coord_x, coord_y = corner_coord
d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2
squared_distances.append(d)
closest_coord_index = numpy.where(squared_distances == numpy.min(squared_distances))[0][0]
closest_coord = all_coords[closest_coord_index]
if closest_coord not in rect_coords:
if closest_coord == ul:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1]))
if closest_coord == ur:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ur[0], ur[1], ll[0], ll[1]))
if closest_coord == lr:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1]))
if closest_coord == ll:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ll[0], ll[1], ur[0], ur[1]))
coords = self.get_shape_canvas_coords(shape_id)
squared_distances = []
coord_indices = numpy.arange(0, len(coords), step=2)
for i in coord_indices:
coord_x, coord_y = coords[i], coords[i+1]
d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2
squared_distances.append(d)
closest_coord_index = numpy.where(squared_distances == numpy.min(squared_distances))[0][0]
return closest_coord_index
def find_closest_shape(self, canvas_x, canvas_y):
"""
Finds the closest shape to the provided canvas coordinates, and returns its id.
Parameters
----------
canvas_x : float
canvas_y : float
Returns
-------
int
"""
# TODO: improve this. Right now it finds closest shape just based on distance to corners.
# Improvements should include:
# finding a closest point if the x/y coordinate is inside a polygon.
# finding closest distance to each line of a polygon.
non_tool_shape_ids = self.get_non_tool_shape_ids()
closest_distances = []
for shape_id in non_tool_shape_ids:
coords = self.get_shape_canvas_coords(shape_id)
squared_distances = []
coord_indices = numpy.arange(0, len(coords), step=2)
for i in coord_indices:
coord_x, coord_y = coords[i], coords[i + 1]
d = (coord_x - canvas_x) ** 2 + (coord_y - canvas_y) ** 2
squared_distances.append(d)
closest_distances.append(numpy.min(squared_distances))
closest_shape_id = non_tool_shape_ids[numpy.where(closest_distances == numpy.min(closest_distances))[0][0]]
return closest_shape_id
def get_non_tool_shape_ids(self):
"""
Gets the shape ids for the everything except shapes assigned to tools, such as the zoom and selection shapes
Returns
-------
List
"""
all_shape_ids = self.variables.shape_ids
tool_shape_ids = self.get_tool_shape_ids()
return list(numpy.setdiff1d(all_shape_ids, tool_shape_ids))
def get_tool_shape_ids(self):
"""
Gets the shape ids for the zoom rectangle and select rectangle.
Returns
-------
List
"""
tool_shape_ids = [self.variables.zoom_rect_id,
self.variables.select_rect_id]
return tool_shape_ids
|
<filename>add_item_window.py
import pygame
from text_box import text_box
from item import item
from buttom import buttom
def add_item(text_input_boxes):
new_thing = []
for i in text_input_boxes:
new_thing.append(i.return_text())
context, level, state, color, date = new_thing
new_thing = item(context, level, state, color, date)
new_thing.write_file("list.txt")
return new_thing
def add_item_window(size, things, list_box):
create_window_size = (500, 600)
create_window = pygame.display.set_mode(create_window_size)
pygame.display.set_caption("Add item")
is_create = True
text_input_labels = []
text_input_boxes = []
counter = 0
white, black = (255, 255, 255), (0, 0, 0)
textbox_active_color = (242, 179, 189) #Luka pink
myfont = pygame.font.SysFont('Comic Sans MS', 30)
item_elements = ['Context', 'Level', 'State', 'Color', 'Date']
for i in item_elements:
temp_box = text_box(pygame.Rect([110, 10 + 80*counter, 380, 70]), white, textbox_active_color)
text_input_boxes.append(temp_box)
text_surface = myfont.render(i, False, (255, 255, 255))
text_input_labels.append(text_surface)
counter += 1
Ok_buttom = buttom(pygame.Rect([50, 420, 130, 70]), white, black)
Cancel_buttom = buttom(pygame.Rect([280, 420, 130, 70]), white, black)
temp_box = text_box(pygame.Rect([20, 20+100*(len(list_box)), 510, 80]), white, textbox_active_color)
is_cancel = False
while is_create:
for event in pygame.event.get():
if event.type == pygame.QUIT:
is_create = False
if event.type == pygame.MOUSEBUTTONDOWN:
for i in text_input_boxes:
if i.return_position().collidepoint(event.pos):
i.change_is_active(True)
else:
i.change_is_active(False)
if event.type == pygame.KEYDOWN:
for i in text_input_boxes:
if i.return_is_active():
if event.key == pygame.K_BACKSPACE:
i.delete_text()
else:
i.add_text(event.unicode)
mouse = pygame.mouse.get_pos()
pressed = pygame.key.get_pressed()
for i in text_input_boxes:
if i.return_is_active():
i.draw_active_box(create_window)
else:
i.draw_unactive_box(create_window)
i.write_in_box(create_window, myfont)
x = 0
for i in text_input_labels:
create_window.blit(i, (20, 35 + 80 * x ))
x += 1
if(Ok_buttom.buttom_is_press(create_window, mouse)):
thing_item= add_item(text_input_boxes)
str_color = thing_item.return_color()[1:-1]
RGB_list = str_color.split(', ')
for j in range(0, len(RGB_list)):
RGB_list[j] = int(RGB_list[j])
temp_box = text_box(pygame.Rect([20, 20 + 100*len(things), 510, 80]), tuple(RGB_list), textbox_active_color)
temp_box.change_text(thing_item.string_form())
things.append(thing_item)
list_box.append(temp_box)
is_create = False
print("Done")
if(Cancel_buttom.buttom_is_press(create_window, mouse)):
is_cancel = True
is_create = False
Ok_buttom.text_in_buttom(create_window, myfont, "Ok")
Cancel_buttom.text_in_buttom(create_window, myfont, "Cancel")
pygame.display.flip()
window = pygame.display.set_mode(size)
pygame.display.set_caption("Listing System")
|
from typing import Optional
import re
class Milepost:
def __init__(self):
self.sustrans_ref = None
self.wiki_sustrans_ref = None
self.wiki_region = None
self.wiki_milepost_type = None
self.wiki_location = None
self.wiki_osm_link = None
self.osm_id = None
self.osm_sustrans_ref = None
self.osm_milepost_type = None
self.osm_longitude = None
self.osm_latitude = None
def set_sustrans_ref(self, sustrans_ref: str):
self.sustrans_ref = sustrans_ref
def set_wiki_sustrans_ref(self, sustrans_ref: str):
self.wiki_sustrans_ref = sustrans_ref
def set_wiki_region(self, region: str):
self.wiki_region = region
def set_wiki_milepost_type(self, milepost_type: str):
self.wiki_milepost_type = milepost_type.lower()
def set_wiki_location(self, location: str):
self.wiki_location = location
def set_wiki_osm_link(self, osm_link: str):
self.wiki_osm_link = osm_link
def set_osm_id(self, osm_id: int):
self.osm_id = osm_id
def set_osm_sustrans_ref(self, sustrans_ref: str):
self.osm_sustrans_ref = sustrans_ref
def set_osm_milepost_type(self, milepost_type: str):
self.osm_milepost_type = milepost_type.lower()
def set_osm_longitude(self, longitude: float):
self.osm_longitude = longitude
def set_osm_latitude(self, latitude: float):
self.osm_latitude = latitude
# General Info
def is_in_wiki(self) -> bool:
return self.wiki_sustrans_ref is not None
def is_in_osm(self) -> bool:
return self.osm_id is not None
# Mapping/Listing validation
def is_mapped_completely_in_osm(self) -> bool:
return self.is_in_osm() and \
self.has_valid_osm_sustrans_ref() and \
self.has_valid_osm_milepost_type()
def is_mapped_and_listed_completely_in_osm_and_wiki(self) -> bool:
return self.is_mapped_completely_in_osm() and \
self.is_in_wiki() and \
self.has_matching_sustrans_ref() and \
self.has_matching_milepost_type() and \
self.has_matching_node_id()
# Sustrans Ref validation
def has_valid_sustrans_ref_as_key(self) -> bool:
return self._valid_sustrans_ref(self.sustrans_ref)
def has_valid_wiki_sustrans_ref(self) -> bool:
return self._valid_sustrans_ref(self.wiki_sustrans_ref)
def has_valid_osm_sustrans_ref(self) -> bool:
return self._valid_sustrans_ref(self.osm_sustrans_ref)
def has_matching_sustrans_ref(self) -> bool:
return self.wiki_sustrans_ref == self.osm_sustrans_ref
# Milepost type validation
def has_valid_wiki_milepost_type(self) -> bool:
return self._valid_milepost_type(self.wiki_milepost_type)
def has_valid_osm_milepost_type(self) -> bool:
return self._valid_milepost_type(self.osm_milepost_type)
def has_matching_milepost_type(self) -> bool:
return self.wiki_milepost_type == self.osm_milepost_type
# Node id validation
def has_valid_node_id_in_wiki(self) -> bool:
return self.wiki_osm_link is not None and \
self.wiki_osm_link.isnumeric()
def has_matching_node_id(self) -> bool:
return self.wiki_osm_link is not None and self.osm_id is not None and \
int(self.wiki_osm_link) == int(self.osm_id)
@staticmethod
def _valid_sustrans_ref(sustrans_ref: Optional[str]) -> bool:
if sustrans_ref is None:
return False
return bool(re.match('^MP[0-9]+$', sustrans_ref))
@staticmethod
def _valid_milepost_type(milepost_type: Optional[str]) -> bool:
if milepost_type is None:
return False
return bool(re.match('^(dudgeon|mccoll|mills|rowe)$', milepost_type))
|
#!/usr/bin/python
#
# Copyright (c) 2018 <NAME>, <<EMAIL>>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: azure_rm_aks_info
short_description: Get Azure Kubernetes Service facts
description:
- Get facts for a specific Azure Kubernetes Service or all Azure Kubernetes Services.
options:
name:
description:
- Limit results to a specific resource group.
type: str
resource_group:
description:
- The resource group to search for the desired Azure Kubernetes Service.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
show_kubeconfig:
description:
- Show kubeconfig of the AKS cluster.
- Note the operation will cost more network overhead, not recommended when listing AKS.
choices:
- user
- admin
type: str
extends_documentation_fragment:
- azure.azcollection.azure
author:
- <NAME> (@yuwzho)
deprecated:
removed_in: '2.0.0'
why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead.
alternative: Use M(azure.azcollection.azure_rm_aks_info) instead.
'''
EXAMPLES = '''
- name: Get facts for one Azure Kubernetes Service
community.azure.azure_rm_aks_info:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all Azure Kubernetes Services
community.azure.azure_rm_aks_info:
- name: Get facts by tags
community.azure.azure_rm_aks_info:
tags:
- testing
'''
RETURN = '''
azure_aks:
description: List of Azure Kubernetes Service dicts.
returned: always
type: list
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureHttpError
except Exception:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'managedClusters'
class AzureRMManagedClusterInfo(AzureRMModuleBase):
"""Utility class to get Azure Kubernetes Service facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
show_kubeconfig=dict(type='str', choices=['user', 'admin']),
)
self.results = dict(
changed=False,
aks=[],
available_versions=[]
)
self.name = None
self.resource_group = None
self.tags = None
self.show_kubeconfig = None
super(AzureRMManagedClusterInfo, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_aks_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_aks_facts' module has been renamed to 'azure_rm_aks_info'",
version='3.0.0', collection_name='community.azure') # was 2.13
for key in self.module_args:
setattr(self, key, kwargs[key])
self.results['aks'] = (
self.get_item() if self.name
else self.list_items()
)
return self.results
def get_item(self):
"""Get a single Azure Kubernetes Service"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
if self.show_kubeconfig:
result[0]['kube_config'] = self.get_aks_kubeconfig(self.resource_group, self.name)
return result
def list_items(self):
"""Get all Azure Kubernetes Services"""
self.log('List all Azure Kubernetes Services')
try:
response = self.managedcluster_client.managed_clusters.list(self.resource_group)
except AzureHttpError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
item_dict = self.serialize_obj(item, AZURE_OBJECT_CLASS)
if self.show_kubeconfig:
item_dict['kube_config'] = self.get_aks_kubeconfig(self.resource_group, item.name)
results.append(item_dict)
return results
def get_aks_kubeconfig(self, resource_group, name):
'''
Gets kubeconfig for the specified AKS instance.
:return: AKS instance kubeconfig
'''
if not self.show_kubeconfig:
return ''
role_name = 'cluster{0}'.format(str.capitalize(self.show_kubeconfig))
access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group, name, role_name)
return access_profile.kube_config.decode('utf-8')
def main():
"""Main module execution code path"""
AzureRMManagedClusterInfo()
if __name__ == '__main__':
main()
|
<filename>jschon/translation.py<gh_stars>0
from __future__ import annotations
from decimal import Decimal
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from jschon.exceptions import RelativeJSONPointerError
from jschon.json import JSON, JSONCompatible
from jschon.jsonpatch import JSONPatch, JSONPatchOperation, PatchOp
from jschon.jsonpointer import JSONPointer, RelativeJSONPointer
from jschon.jsonschema import JSONSchema, Scope
from jschon.output import JSONSchemaOutputFormatter
__all__ = [
'JSONTranslationSchema',
'TranslationScope',
'TranslationOutputFormatter',
'TranslationFilter',
'translation_filter',
]
class NoValue:
pass
class JSONTranslationSchema(JSONSchema):
def __init__(
self,
*args: Any,
scheme: str = None,
**kwargs: Any,
) -> None:
self.t9n_scheme: Optional[str] = scheme
self.t9n_source: Optional[RelativeJSONPointer] = None
self.t9n_const: Optional[JSONCompatible] = NoValue
self.t9n_concat: Optional[Tuple[RelativeJSONPointer, ...]] = None
self.t9n_sep: str = ''
self.t9n_filter: Optional[Union[str, Dict[str, JSONCompatible]]] = None
self.t9n_cast: Optional[str] = None
self.t9n_leaf: bool = True
super().__init__(*args, **kwargs)
def evaluate(self, instance: JSON, scope: TranslationScope = None) -> Scope:
if self.t9n_source is not None:
try:
source = self.t9n_source.evaluate(instance)
except RelativeJSONPointerError:
return scope
else:
source = instance
super().evaluate(source, scope)
if scope.valid and self.t9n_leaf:
if self.t9n_const is not NoValue:
value = self.t9n_const
elif self.t9n_concat is not None:
value = []
for item in self.t9n_concat:
try:
value += [self._make_value(item.evaluate(source))]
except RelativeJSONPointerError:
pass
if value:
value = self.t9n_sep.join(str(v) for v in value)
else:
value = NoValue
else:
value = self._make_value(source)
if value is not NoValue:
scope.add_translation_patch(self.t9n_scheme, scope.t9n_target, value)
return scope
def _make_value(self, instance: JSON) -> JSONCompatible:
result = instance.value
if isinstance(self.t9n_filter, str):
if filter_fn := _translation_filters.get(self.t9n_filter):
result = filter_fn(result)
elif isinstance(self.t9n_filter, dict):
result = self.t9n_filter.get(result, result)
if self.t9n_cast == 'boolean':
result = bool(result)
elif self.t9n_cast == 'integer':
result = int(result)
elif self.t9n_cast == 'number':
result = Decimal(f'{result}')
elif self.t9n_cast == 'string':
result = str(result)
return result
TranslationFilter = Callable[[JSONCompatible], JSONCompatible]
_translation_filters: Dict[str, TranslationFilter] = {}
def translation_filter(name: str = None):
def decorator(f):
filter_name = name if isinstance(name, str) else f.__name__
_translation_filters[filter_name] = f
return f
return decorator(name) if callable(name) else decorator
class TranslationScope(Scope):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.t9n_target: Optional[JSONPointer] = None
self.t9n_patchops: Optional[Dict[str, List[JSONPatchOperation]]] = None
def add_translation_patch(
self,
scheme: str,
target: JSONPointer,
value: JSONCompatible,
) -> None:
if self.t9n_patchops is None:
self.t9n_patchops = {}
self.t9n_patchops.setdefault(scheme, [])
self.t9n_patchops[scheme] += [JSONPatchOperation(
op=PatchOp.ADD,
path=target,
value=value,
)]
def init_array(self, scheme: str, array_path: JSONPointer) -> None:
self.globals.setdefault(scheme, {})
self.globals[scheme].setdefault('arrays', {})
if array_path not in self.globals[scheme]['arrays']:
self.globals[scheme]['arrays'][array_path] = 0
self.add_translation_patch(scheme, array_path, [])
def next_array_index(self, scheme: str, array_path: JSONPointer) -> int:
next_index = self.globals[scheme]['arrays'][array_path]
self.globals[scheme]['arrays'][array_path] += 1
return next_index
def init_object(self, scheme: str, object_path: JSONPointer) -> None:
self.globals.setdefault(scheme, {})
self.globals[scheme].setdefault('objects', set())
if object_path not in self.globals[scheme]['objects']:
self.globals[scheme]['objects'] |= {object_path}
self.add_translation_patch(scheme, object_path, {})
class TranslationOutputFormatter(JSONSchemaOutputFormatter):
def create_output(self, scope: Scope, format: str, **kwargs: Any) -> JSONCompatible:
def visit(node: Scope):
if node.valid:
if hasattr(node, 't9n_patchops'):
try:
yield from node.t9n_patchops[scheme]
except (KeyError, TypeError):
pass
for child in node.iter_children():
yield from visit(child)
if format in ('patch', 'translation'):
try:
scheme = kwargs.pop('scheme')
except KeyError:
raise TypeError("Missing keyword argument 'scheme'")
patch = JSONPatch(*(patchop for patchop in visit(scope)))
if format == 'patch':
return patch.aslist()
else:
return patch.evaluate(None)
return super().create_output(scope, format, **kwargs)
|
<gh_stars>1-10
import jieba
file1 = open(r'data.csv', 'r', encoding='utf-8')
lines = file1.readlines()
true_information_need_help = []
true_information_offer_help = []
true_information_other = []
for idx, line in enumerate(lines):
classes = line.split(',')[7].strip()
if classes.startswith("求救"):
#print(idx)
#print(classes)
true_information_need_help.append(idx)
elif classes.startswith("帮助"):
#print(idx)
#print(classes)
true_information_offer_help.append(idx)
elif classes.startswith("其他"):
#print(idx)
#print(classes)
true_information_other.append(idx)
print(len(true_information_need_help))
print(len(true_information_offer_help))
print(len(true_information_other))
file1.close()
file1 = open(r'data.csv', 'r', encoding='utf-8')
lines = file1.readlines()
true_information_need_help_dict = {}
true_information_offer_help_dict = {}
true_information_other_dict = {}
total_need_help = 0
total_offer_help = 0
total_other = 0
for idx, line in enumerate(lines):
# if idx == 0:
# continue
tmp_line = line.split(',')[5].strip()
# print(idx-1)
# print(tmp_line)
# print(" ")
if idx in true_information_need_help:
seg_list = jieba.cut(tmp_line, cut_all=False, HMM=True)
split_line = " ".join(seg_list).split()
for words in split_line:
for word in words:
if u'\u4e00' <= word <= u'\u9fff':
total_need_help += 1
if true_information_need_help_dict.get(words) == None:
true_information_need_help_dict[words] = 1
else:
true_information_need_help_dict[words] += 1
# print(words)
break
if idx in true_information_offer_help:
seg_list = jieba.cut(tmp_line, cut_all=False, HMM=True)
split_line = " ".join(seg_list).split()
for words in split_line:
for word in words:
if u'\u4e00' <= word <= u'\u9fff':
total_offer_help += 1
if true_information_offer_help_dict.get(words) == None:
true_information_offer_help_dict[words] = 1
else:
true_information_offer_help_dict[words] += 1
# print(words)
break
if idx in true_information_other:
seg_list = jieba.cut(tmp_line, cut_all=False, HMM=True)
split_line = " ".join(seg_list).split()
for words in split_line:
for word in words:
if u'\u4e00' <= word <= u'\u9fff':
total_other += 1
if true_information_other_dict.get(words) == None:
true_information_other_dict[words] = 1
else:
true_information_other_dict[words] += 1
# print(words)
break
# if idx == 10:
# print(true_words_dictionary)
# print(fake_words_dictionary)
# break
print(total_need_help)
print(total_offer_help)
print(total_other)
true_information_need_help_dict = sorted(true_information_need_help_dict.items(), key=lambda x: x[1], reverse=True)
true_information_offer_help_dict = sorted(true_information_offer_help_dict.items(), key=lambda x: x[1], reverse=True)
true_information_other_dict = sorted(true_information_other_dict.items(), key=lambda x: x[1], reverse=True)
file2 = open(r'Need_help_dict.txt', 'w', encoding='utf-8')
file3 = open(r'Offer_help_dict.txt', 'w', encoding='utf-8')
file4 = open(r'Other_dict.txt', 'w', encoding='utf-8')
for need_help_tuple in true_information_need_help_dict:
name = need_help_tuple[0]
number = need_help_tuple[1]
ratio = number/total_need_help*1000
# print(ratio)
file2.write(name)
file2.write(" ")
file2.write(str(ratio))
file2.write("\n")
for offer_help_tuple in true_information_offer_help_dict:
name = offer_help_tuple[0]
number = offer_help_tuple[1]
ratio = number/total_offer_help*1000
# print(ratio)
file3.write(name)
file3.write(" ")
file3.write(str(ratio))
file3.write("\n")
for other_tuple in true_information_other_dict:
name = other_tuple[0]
number = other_tuple[1]
ratio = number/total_other*1000
#print(ratio)
file4.write(name)
file4.write(" ")
file4.write(str(ratio))
file4.write("\n")
file1.close()
file2.close()
file3.close()
file4.close() |
# -*- encoding: utf-8 -*-
'''
@Description :一个基本的component,可以直接运行在一个运行环境里
@Date :2021/04/19 10:25:11
@Author :lzm
@version :0.0.1
'''
from onceml.types.artifact import Artifact
from onceml.types.channel import Channels, OutputChannel
from onceml.types.state import State
from typing import Any, Dict, List, Optional, Tuple
from .base_executor import BaseExecutor
from onceml.utils.json_utils import Jsonable
import onceml.types.exception as exception
from enum import Enum
from deprecated.sphinx import deprecated
class BaseComponentDeployType(Enum):
DO = 'Do'
CYCLE = 'Cycle'
class PodContainer():
def __init__(self) -> None:
pass
class BaseComponent(Jsonable):
"""BaseComponent是最基本的组件
BaseComponent是最基本的组件,更复杂的组件应当继承于他。为了保证组件之间的数据流向,component应当具有channel、artifact;应当具有input属性与output属性;同时,组件要运行,就应当具有一个实际执行的逻辑过程
总结一下:Channel、Artifact是在运行过程中产生的数据结构;param是在运行前设置好的参数;组件的inputs是依赖的组件,outputs是返回组件的channel、artifact
"""
def __init__(self,
executor: BaseExecutor.__class__,
inputs: Optional[List] = None,
shared: bool = False,
**args):
"""
description
---------
一个基本的component就是pipleine中的最小单元,各个component存在数据依赖,却又独立运行
Args
-------
inputs (List[BaseComponent]):依赖的组件,假如有的话
instance_name (str): 给组件取一个名字,会作为id属性(如果未指定,则由系统分配)
shared(bool):是否会共享组件的数据 (todo)
args :自行定义各种参数,component会检查每个参数的type,如果是OutputChannel,就是组件Channel的一个属性,只会在组件的cycle或者do函数执行完成后
对其返回的字典中的key做校验,如果key没有用OutputChannel声明,则会被丢弃
;其他则认为是params,返回给组件运行时使用
Returns
-------
None
Raises
-------
TypeError:没有按照给定的参数类型来构造
"""
if not issubclass(executor, BaseExecutor):
raise TypeError('传入的executor不是BaseExecutor class')
if inputs is not None and type(inputs)!=list:
raise TypeError("组件的inputs必须是list")
self._dependentComponent: List[BaseComponent] = inputs or []
for c in self._dependentComponent:
if not isinstance(c, BaseComponent):
raise TypeError('inputs必须是BaseComponent类或子类')
# 组件运行前传入的静态参数
self._params = {}
# 组件运行中产生的结果
self._channel = {}
for key, value in args.items():
if type(value) == OutputChannel: # 组件的Channels
self._channel[key] = value
else: # 组件的params
self._params[key] = value
# 初始化Artifact
self._artifact = Artifact()
# 组件状态
self._state = State()
# 组件是否会共享
self._datashared = shared
# 找到依赖的组件后,就该将他们的channel、artifact加入进来,这个具体的由pipeline操作
self._dependentChannels = {}
self._dependentArtifacts = {}
# 拿到executor class
self._executor_cls = executor
self._deploytype = None
# 检查executor class是否只重写了一个函数
# if (bool(self._executor_cls.Do==BaseExecutor.Do)==bool(self._executor_cls.Cycle==BaseExecutor.Cycle)):
# raise SyntaxError('Do与Cycle必须有且只能有一个被重写')
# 组件在拓扑DAG里面第几层
self._topoLayerIndex = -1
# 当前节点的上游节点与下游节点
self._upstreamComponents = set()
self._downstreamComponents = set()
# cache机制,判断当前组件是否与之前的组件有所变动,默认改变了,需要删除之前的数据
self._changed = True
self._namespace = None
@property
def topoLayerIndex(self):
return self._topoLayerIndex
@topoLayerIndex.setter
def topoLayerIndex(self, index: int):
if index < 0:
raise Exception("组件的topo Index必须不小于0")
self._topoLayerIndex = index
@property
def inputs(self):
return self._dependentComponent
@property
def resourceNamepace(self):
return self._namespace
@resourceNamepace.setter
def resourceNamepace(self, namespace: str):
self._namespace = namespace
@property
def outputs(self):
return self._channel
@property
def dependentComponent(self):
return self._dependentComponent
@property
def upstreamComponents(self):
return self._upstreamComponents
def add_upstream_Components(self, component):
self._upstreamComponents.add(component)
@property
def changed(self):
return self._changed
def setChanged(self, changed: bool):
self._changed = changed
@property
def downstreamComponents(self):
return self._downstreamComponents
def add_downstream_Components(self, component):
self._downstreamComponents.add(component)
@property
def artifact(self):
"""组件产生的数据文件的存放地方
不同于Channels,artifact用来存放数据文件,这些文件通常借助硬盘来交换
"""
return self._artifact
@property
def id(self):
"""组件的唯一id
可以由组件的构造函数的instance_name指定,或者由系统分配,组件的id在pipeline里唯一
"""
return self._id
@property
def type(self) -> str:
'''该实例归属于哪个class,包名+class名
'''
return self.__class__.get_class_type()
@property
def deploytype(self) -> str:
'''该实例是一次执行还是循环执行
Do:一次执行结束
Cycle:循环执行
'''
# if(self._executor_cls.Do!=BaseExecutor.Do):
# return 'Do'
# else:
# return 'Cycle'
return self._deploytype
@deploytype.setter
def deploytype(self, d_type):
if d_type not in BaseComponentDeployType._value2member_map_:
raise exception.DeployTypeError('DeployType只能是Do或者Cycle')
self._deploytype = d_type
@property
def datashared(self) -> bool:
'''组件的数据是否会共享出来
'''
return self._datashared
@id.setter
def id(self, _id: str):
_id = _id or ''
if not isinstance(_id, str):
raise TypeError('组件id必须是str类型')
if _id == '': # 如果用户没有指定,就用类的名称做id
#print('component id: ',self.__class__.__name__)
self._id = str(self.__class__.__name__).lower()
else:
self._id = _id.lower()
@classmethod
def get_class_type(cls) -> str:
return '.'.join([cls.__module__, cls.__name__])
def to_json_dict(self):
json_dict = {}
for k, v in self.__dict__.items():
if k not in [
'_downstreamComponents', '_upstreamComponents',
'_dependentComponent'
]:
json_dict[k] = v
elif k in ['_downstreamComponents', '_upstreamComponents']:
json_dict[k] = [component.id for component in v]
return json_dict
@property
def state(self) -> State:
return self._state
@state.setter
def state(self, json2state: Dict[str, Any]) -> None:
self._state = State(data=json2state)
def static_check(self,task_name:str,model_name:str):
"""
description
---------
组件在被解析成workflow资源的之前的静态过程。
比如model generator组件与model serving组件,就需要进行模型依赖DAG图的构建,在这个构建的过程里
需要确保DAG不能出现环
Args
-------
task_name:str pipeline的task名称
model_name:str pipeline的model名称
Returns
-------
Raises
-------
"""
raise Exception("must be extended")
"""
todo:组件的缓存复用检测目前是放在静态编译阶段,然后将是否可复用的flag传递到组件里
组件序列化后,编排器再执行的时候会根据这个flag判断是否要清空之前的数据。这样就会有一个问题,
如果是cycle类型的组件,进程挂了后,再重启,会导致数据被删除,以后考虑将这个过程作为动态过程
"""
def dynamic_check(self):
"""
description
---------
这个是当组件在实际被driver执行时的动态check过程
Args
-------
Returns
-------
Raises
-------
"""
raise Exception("must be extended")
def extra_svc_port_internal(self)->List[Tuple[str,str,int]]:
"""组件的运行需要暴露的端口
有些时候,框架由于拓展性,组件可能需要自己运行一个server一类的程序,这个时候需要暴露端口出去,因此可以返回一个list:
[("ts","TCP",8080),...],这里ts表示使用了torch serving框架
"""
return []
def extra_svc_port_user(self)->List[Tuple[str,str,int]]:
"""用户需要暴露的端口
"""
return []
def extra_pod_containers_internal(self)->List[PodContainer]:
"""框架需要的其他容器
description
---------
如果组件需要运行其他的服务,可以将其他的进程运行在其他的容器里。正常而言是不需要再运行
其他服务,但举个例子,modelserving组件需要torchserving这么一个进程
Args
-------
Returns
-------
Raises
-------
"""
return []
def extra_pod_containers_user(self)->List[PodContainer]:
"""用户需要的容器
"""
return [] |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PhaseStatus(object):
"""
Job phase status details.
"""
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_TGT"
NAME_ODMS_VALIDATE_TGT = "ODMS_VALIDATE_TGT"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_SRC"
NAME_ODMS_VALIDATE_SRC = "ODMS_VALIDATE_SRC"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_PREMIGRATION_ADVISOR"
NAME_ODMS_VALIDATE_PREMIGRATION_ADVISOR = "ODMS_VALIDATE_PREMIGRATION_ADVISOR"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_GG_HUB"
NAME_ODMS_VALIDATE_GG_HUB = "ODMS_VALIDATE_GG_HUB"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_DATAPUMP_SETTINGS"
NAME_ODMS_VALIDATE_DATAPUMP_SETTINGS = "ODMS_VALIDATE_DATAPUMP_SETTINGS"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC"
NAME_ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC = "ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT"
NAME_ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT = "ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_DATAPUMP_SRC"
NAME_ODMS_VALIDATE_DATAPUMP_SRC = "ODMS_VALIDATE_DATAPUMP_SRC"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE_DATAPUMP_ESTIMATE_SRC"
NAME_ODMS_VALIDATE_DATAPUMP_ESTIMATE_SRC = "ODMS_VALIDATE_DATAPUMP_ESTIMATE_SRC"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_VALIDATE"
NAME_ODMS_VALIDATE = "ODMS_VALIDATE"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_PREPARE"
NAME_ODMS_PREPARE = "ODMS_PREPARE"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_INITIAL_LOAD_EXPORT"
NAME_ODMS_INITIAL_LOAD_EXPORT = "ODMS_INITIAL_LOAD_EXPORT"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_DATA_UPLOAD"
NAME_ODMS_DATA_UPLOAD = "ODMS_DATA_UPLOAD"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_INITIAL_LOAD_IMPORT"
NAME_ODMS_INITIAL_LOAD_IMPORT = "ODMS_INITIAL_LOAD_IMPORT"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_POST_INITIAL_LOAD"
NAME_ODMS_POST_INITIAL_LOAD = "ODMS_POST_INITIAL_LOAD"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_PREPARE_REPLICATION_TARGET"
NAME_ODMS_PREPARE_REPLICATION_TARGET = "ODMS_PREPARE_REPLICATION_TARGET"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_MONITOR_REPLICATION_LAG"
NAME_ODMS_MONITOR_REPLICATION_LAG = "ODMS_MONITOR_REPLICATION_LAG"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_SWITCHOVER"
NAME_ODMS_SWITCHOVER = "ODMS_SWITCHOVER"
#: A constant which can be used with the name property of a PhaseStatus.
#: This constant has a value of "ODMS_CLEANUP"
NAME_ODMS_CLEANUP = "ODMS_CLEANUP"
#: A constant which can be used with the status property of a PhaseStatus.
#: This constant has a value of "PENDING"
STATUS_PENDING = "PENDING"
#: A constant which can be used with the status property of a PhaseStatus.
#: This constant has a value of "STARTED"
STATUS_STARTED = "STARTED"
#: A constant which can be used with the status property of a PhaseStatus.
#: This constant has a value of "COMPLETED"
STATUS_COMPLETED = "COMPLETED"
#: A constant which can be used with the status property of a PhaseStatus.
#: This constant has a value of "FAILED"
STATUS_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new PhaseStatus object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this PhaseStatus.
Allowed values for this property are: "ODMS_VALIDATE_TGT", "ODMS_VALIDATE_SRC", "ODMS_VALIDATE_PREMIGRATION_ADVISOR", "ODMS_VALIDATE_GG_HUB", "ODMS_VALIDATE_DATAPUMP_SETTINGS", "ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC", "ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT", "ODMS_VALIDATE_DATAPUMP_SRC", "ODMS_VALIDATE_DATAPUMP_ESTIMATE_SRC", "ODMS_VALIDATE", "ODMS_PREPARE", "ODMS_INITIAL_LOAD_EXPORT", "ODMS_DATA_UPLOAD", "ODMS_INITIAL_LOAD_IMPORT", "ODMS_POST_INITIAL_LOAD", "ODMS_PREPARE_REPLICATION_TARGET", "ODMS_MONITOR_REPLICATION_LAG", "ODMS_SWITCHOVER", "ODMS_CLEANUP", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type name: str
:param status:
The value to assign to the status property of this PhaseStatus.
Allowed values for this property are: "PENDING", "STARTED", "COMPLETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param duration_in_ms:
The value to assign to the duration_in_ms property of this PhaseStatus.
:type duration_in_ms: int
:param is_advisor_report_available:
The value to assign to the is_advisor_report_available property of this PhaseStatus.
:type is_advisor_report_available: bool
:param extract:
The value to assign to the extract property of this PhaseStatus.
:type extract: list[oci.database_migration.models.PhaseExtractEntry]
:param log_location:
The value to assign to the log_location property of this PhaseStatus.
:type log_location: oci.database_migration.models.LogLocationBucketDetails
:param progress:
The value to assign to the progress property of this PhaseStatus.
:type progress: int
"""
self.swagger_types = {
'name': 'str',
'status': 'str',
'duration_in_ms': 'int',
'is_advisor_report_available': 'bool',
'extract': 'list[PhaseExtractEntry]',
'log_location': 'LogLocationBucketDetails',
'progress': 'int'
}
self.attribute_map = {
'name': 'name',
'status': 'status',
'duration_in_ms': 'durationInMs',
'is_advisor_report_available': 'isAdvisorReportAvailable',
'extract': 'extract',
'log_location': 'logLocation',
'progress': 'progress'
}
self._name = None
self._status = None
self._duration_in_ms = None
self._is_advisor_report_available = None
self._extract = None
self._log_location = None
self._progress = None
@property
def name(self):
"""
**[Required]** Gets the name of this PhaseStatus.
Phase name
Allowed values for this property are: "ODMS_VALIDATE_TGT", "ODMS_VALIDATE_SRC", "ODMS_VALIDATE_PREMIGRATION_ADVISOR", "ODMS_VALIDATE_GG_HUB", "ODMS_VALIDATE_DATAPUMP_SETTINGS", "ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC", "ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT", "ODMS_VALIDATE_DATAPUMP_SRC", "ODMS_VALIDATE_DATAPUMP_ESTIMATE_SRC", "ODMS_VALIDATE", "ODMS_PREPARE", "ODMS_INITIAL_LOAD_EXPORT", "ODMS_DATA_UPLOAD", "ODMS_INITIAL_LOAD_IMPORT", "ODMS_POST_INITIAL_LOAD", "ODMS_PREPARE_REPLICATION_TARGET", "ODMS_MONITOR_REPLICATION_LAG", "ODMS_SWITCHOVER", "ODMS_CLEANUP", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The name of this PhaseStatus.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this PhaseStatus.
Phase name
:param name: The name of this PhaseStatus.
:type: str
"""
allowed_values = ["ODMS_VALIDATE_TGT", "ODMS_VALIDATE_SRC", "ODMS_VALIDATE_PREMIGRATION_ADVISOR", "ODMS_VALIDATE_GG_HUB", "ODMS_VALIDATE_DATAPUMP_SETTINGS", "ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC", "ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT", "ODMS_VALIDATE_DATAPUMP_SRC", "ODMS_VALIDATE_DATAPUMP_ESTIMATE_SRC", "ODMS_VALIDATE", "ODMS_PREPARE", "ODMS_INITIAL_LOAD_EXPORT", "ODMS_DATA_UPLOAD", "ODMS_INITIAL_LOAD_IMPORT", "ODMS_POST_INITIAL_LOAD", "ODMS_PREPARE_REPLICATION_TARGET", "ODMS_MONITOR_REPLICATION_LAG", "ODMS_SWITCHOVER", "ODMS_CLEANUP"]
if not value_allowed_none_or_none_sentinel(name, allowed_values):
name = 'UNKNOWN_ENUM_VALUE'
self._name = name
@property
def status(self):
"""
**[Required]** Gets the status of this PhaseStatus.
Phase status
Allowed values for this property are: "PENDING", "STARTED", "COMPLETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this PhaseStatus.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this PhaseStatus.
Phase status
:param status: The status of this PhaseStatus.
:type: str
"""
allowed_values = ["PENDING", "STARTED", "COMPLETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def duration_in_ms(self):
"""
**[Required]** Gets the duration_in_ms of this PhaseStatus.
Duration of the phase in milliseconds
:return: The duration_in_ms of this PhaseStatus.
:rtype: int
"""
return self._duration_in_ms
@duration_in_ms.setter
def duration_in_ms(self, duration_in_ms):
"""
Sets the duration_in_ms of this PhaseStatus.
Duration of the phase in milliseconds
:param duration_in_ms: The duration_in_ms of this PhaseStatus.
:type: int
"""
self._duration_in_ms = duration_in_ms
@property
def is_advisor_report_available(self):
"""
Gets the is_advisor_report_available of this PhaseStatus.
True if a Pre-Migration Advisor report is available for this phase. False or null if no report is available.
:return: The is_advisor_report_available of this PhaseStatus.
:rtype: bool
"""
return self._is_advisor_report_available
@is_advisor_report_available.setter
def is_advisor_report_available(self, is_advisor_report_available):
"""
Sets the is_advisor_report_available of this PhaseStatus.
True if a Pre-Migration Advisor report is available for this phase. False or null if no report is available.
:param is_advisor_report_available: The is_advisor_report_available of this PhaseStatus.
:type: bool
"""
self._is_advisor_report_available = is_advisor_report_available
@property
def extract(self):
"""
Gets the extract of this PhaseStatus.
Summary of phase status results.
:return: The extract of this PhaseStatus.
:rtype: list[oci.database_migration.models.PhaseExtractEntry]
"""
return self._extract
@extract.setter
def extract(self, extract):
"""
Sets the extract of this PhaseStatus.
Summary of phase status results.
:param extract: The extract of this PhaseStatus.
:type: list[oci.database_migration.models.PhaseExtractEntry]
"""
self._extract = extract
@property
def log_location(self):
"""
Gets the log_location of this PhaseStatus.
:return: The log_location of this PhaseStatus.
:rtype: oci.database_migration.models.LogLocationBucketDetails
"""
return self._log_location
@log_location.setter
def log_location(self, log_location):
"""
Sets the log_location of this PhaseStatus.
:param log_location: The log_location of this PhaseStatus.
:type: oci.database_migration.models.LogLocationBucketDetails
"""
self._log_location = log_location
@property
def progress(self):
"""
Gets the progress of this PhaseStatus.
Percent progress of job phase.
:return: The progress of this PhaseStatus.
:rtype: int
"""
return self._progress
@progress.setter
def progress(self, progress):
"""
Sets the progress of this PhaseStatus.
Percent progress of job phase.
:param progress: The progress of this PhaseStatus.
:type: int
"""
self._progress = progress
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
<reponame>Yuibooo/pytorch-soft-actor-critic
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, QNetwork, DeterministicPolicy
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BEARQL(object):
def __init__(self, num_inputs, action_space, args):
self.gamma = args.gamma
self.tau = args.tau
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(device)
hard_update(self.critic_target, self.critic)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
self.policy_target = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(device)
hard_update(self.policy_target, self.policy)
# dual_lambda
self.dual_lambda = args.init_dual_lambda
self.dual_step_size = args.dual_step_size
self.cost_epsilon = args.cost_epsilon
# coefficient_weight assigned to ensemble variance term
self.coefficient_weight = args.coefficient_weight
self.dual_grad_times = args.dual_grad_times
# used in evaluation
def select_action(self, state):
# sample multiple policies and perform a greedy maximization of Q over these policies
with torch.no_grad():
state = torch.FloatTensor(state.reshape(1, -1)).repeat(10, 1).to(device)
# state = torch.FloatTensor(state.reshape(1, -1)).to(device)
action, _, mean = self.policy.sample(state)
# q1, q2 = self.critic(state, action)
q1, q2, q3 = self.critic(state, action)
ind = q1.max(0)[1]
return action[ind].cpu().data.numpy().flatten()
# return action.cpu().data.numpy().flatten()
# MMD functions
def compute_kernel(self, x, y, sigma):
batch_size = x.shape[0]
x_size = x.shape[1]
y_size = y.shape[1]
dim = x.shape[2]
tiled_x = x.view(batch_size, x_size, 1, dim).repeat([1, 1, y_size, 1])
tiled_y = y.view(batch_size, 1, y_size, dim).repeat([1, x_size, 1, 1])
return torch.exp(-(tiled_x - tiled_y).pow(2).sum(dim=3) / (2 * sigma))
def compute_mmd(self, x, y, sigma=20.):
x_kernel = self.compute_kernel(x, x, sigma)
y_kernel = self.compute_kernel(y, y, sigma)
xy_kernel = self.compute_kernel(x, y, sigma)
square_mmd = x_kernel.mean((1, 2)) + y_kernel.mean((1, 2)) - 2 * xy_kernel.mean((1, 2))
return square_mmd
def train(self, prior, memory, batch_size, m=4, n=4):
# Sample replay buffer / batch
state_np, action_np, reward_np, next_state_np, mask_np = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_np).to(device)
next_state_batch = torch.FloatTensor(next_state_np).to(device)
action_batch = torch.FloatTensor(action_np).to(device)
reward_batch = torch.FloatTensor(reward_np).to(device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_np).to(device).unsqueeze(1)
# Critic Training
with torch.no_grad():
# Duplicate state 10 times
next_state_rep = torch.FloatTensor(np.repeat(next_state_np, 10, axis=0)).to(device)
# Soft Clipped Double Q-learning
next_state_action, _, _ = self.policy_target.sample(next_state_rep)
target_Q1, target_Q2, target_Q3 = self.critic_target(next_state_rep, next_state_action)
target_cat = torch.cat([target_Q1, target_Q2, target_Q3], 1)
target_Q = 0.75 * target_cat.min(1)[0] + 0.25 * target_cat.max(1)[0]
target_Q = target_Q.view(batch_size, -1).max(1)[0].view(-1, 1)
next_q_value = reward_batch + mask_batch * self.gamma * target_Q
qf1, qf2, qf3 = self.critic(state_batch, action_batch) # ensemble of k Q-functions
q_loss = F.mse_loss(qf1, next_q_value) + F.mse_loss(qf2, next_q_value) + F.mse_loss(qf3, next_q_value)
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
# Actor Training
with torch.no_grad():
state_rep_m = torch.FloatTensor(np.repeat(state_np, m, axis=0)).to(device)
state_rep_n = torch.FloatTensor(np.repeat(state_np, n, axis=0)).to(device)
for i in range(self.dual_grad_times):
prior_a_rep, _, _ = prior.sample(state_rep_n)
prior_a_rep = prior_a_rep.view(batch_size, n, -1)
pi_rep, _, _ = self.policy.sample(state_rep_m)
pi_rep = pi_rep.view(batch_size, m, -1)
mmd_dist = self.compute_mmd(prior_a_rep, pi_rep)
pi, _, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi, qf3_pi = self.critic(state_batch, pi)
qf_cat = torch.cat([qf1_pi, qf2_pi, qf3_pi], 1)
# min_qf_pi = torch.min(qf1_pi, qf2_pi) # used in TD3
# use conservative estimate of Q as used in BEAR
qf_mean = qf_cat.mean(1)
qf_var = qf_cat.var(1)
min_qf_pi = qf_mean - self.coefficient_weight * qf_var.sqrt() # used in BEAR
policy_loss = -(min_qf_pi - self.dual_lambda*mmd_dist).mean()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
# Dual Lambda Training
self.dual_gradients = mmd_dist.mean().item() - self.cost_epsilon
self.dual_lambda += self.dual_step_size * self.dual_gradients
self.dual_lambda = np.clip(self.dual_lambda, np.power(np.e, -5), np.power(np.e, 10))
# Update Target Networks
soft_update(self.critic_target, self.critic, self.tau)
soft_update(self.policy_target, self.policy, self.tau)
return q_loss.item(), policy_loss.item(), self.dual_lambda, mmd_dist.mean().item()
# Save model parameters
def save_model(self, env_name, suffix="", actor_path=None, critic_path=None):
if not os.path.exists('models/'):
os.makedirs('models/')
if actor_path is None:
actor_path = "models/BEAR_actor_{}_{}".format(env_name, suffix)
if critic_path is None:
critic_path = "models/BEAR_critic_{}_{}".format(env_name, suffix)
print('Saving models to {} and {}'.format(actor_path, critic_path))
torch.save(self.policy.state_dict(), actor_path)
torch.save(self.critic.state_dict(), critic_path)
# Load model parameters
def load_model(self, actor_path, critic_path):
print('Loading models from {} and {}'.format(actor_path, critic_path))
if actor_path is not None:
self.policy.load_state_dict(torch.load(actor_path))
if critic_path is not None:
self.critic.load_state_dict(torch.load(critic_path))
|
#
# @file TestSBase.py
# @brief SBase unit tests
#
# @author <NAME> (Python conversion)
# @author <NAME>
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSBase.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def wrapString(s):
return s
pass
class TestSBase(unittest.TestCase):
global S
S = None
def setUp(self):
self.S = libsbml.Model(2,4)
if (self.S == None):
pass
pass
def tearDown(self):
self.S = None
pass
def test_SBase_CVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_IS)
cv.addResource( "foo")
self.assert_( self.S.getNumCVTerms() == 0 )
#self.assert_( self.S.getCVTerms() == None )
self.assert_( len(self.S.getCVTerms()) == 0 )
self.S.setMetaId( "_id")
self.S.addCVTerm(cv)
self.assert_( self.S.getNumCVTerms() == 1 )
#self.assert_( self.S.getCVTerms() != None )
self.assert_( len(self.S.getCVTerms()) == 1 )
self.assert_( self.S.getCVTerm(0) != cv )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_addCVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
self.assert_( self.S.getNumCVTerms() == 1 )
#self.assert_( self.S.getCVTerms() != None )
self.assert_( len(self.S.getCVTerms()) == 1 )
res = self.S.getCVTerm(0).getResources()
self.assert_(( "foo" == res.getValue(0) ))
cv1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv1.setBiologicalQualifierType(libsbml.BQB_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
self.assert_( self.S.getNumCVTerms() == 2 )
cv2 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv2.setBiologicalQualifierType(libsbml.BQB_IS)
cv2.addResource( "bar1")
self.S.addCVTerm(cv2)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
cv4 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv4.setBiologicalQualifierType(libsbml.BQB_IS)
cv4.addResource( "bar1")
self.S.addCVTerm(cv4)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
cv5 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv5.setBiologicalQualifierType(libsbml.BQB_HAS_PART)
cv5.addResource( "bar1")
self.S.addCVTerm(cv5)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv4 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes(self):
triple = libsbml.XMLTriple("p", "", "")
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
token4 = libsbml.XMLToken("<PASSWORD>")
node4 = libsbml.XMLNode(token4)
token5 = libsbml.XMLToken("<PASSWORD> additional <PASSWORD>")
node5 = libsbml.XMLNode(token5)
token = libsbml.XMLToken(triple,att,ns)
node = libsbml.XMLNode(token)
node.addChild(node4)
self.S.setNotes(node)
self.assert_( self.S.isSetNotes() == True )
token1 = libsbml.XMLToken(triple,att,ns)
node1 = libsbml.XMLNode(token1)
node1.addChild(node5)
self.S.appendNotes(node1)
self.assert_( self.S.isSetNotes() == True )
node2 = self.S.getNotes()
self.assert_( node2.getNumChildren() == 2 )
self.assert_(( "p" == node2.getChild(0).getName() ))
self.assert_( node2.getChild(0).getNumChildren() == 1 )
self.assert_(( "p" == node2.getChild(1).getName() ))
self.assert_( node2.getChild(1).getNumChildren() == 1 )
chars1 = node2.getChild(0).getChild(0).getCharacters()
chars2 = node2.getChild(1).getChild(0).getCharacters()
self.assert_(( "This is my text" == chars1 ))
self.assert_(( "This is additional text" == chars2 ))
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes1(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("<PASSWORD>")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("<PASSWORD>")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(html_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes2(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("<PASSWORD>")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
body_token1 = libsbml.XMLToken(body_triple,att,ns)
text_token1 = libsbml.XMLToken("<PASSWORD>")
body_node1 = libsbml.XMLNode(body_token1)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(html_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes3(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("<PASSWORD> my <PASSWORD>")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token1 = libsbml.XMLToken("<PASSWORD>")
p_node1 = libsbml.XMLNode(p_token1)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
self.S.setNotes(html_node)
self.S.appendNotes(p_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes4(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
body_token1 = libsbml.XMLToken(body_triple,att,ns)
text_token = libsbml.XMLToken("This is my text")
body_node = libsbml.XMLNode(body_token1)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("<PASSWORD> more <PASSWORD>")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(body_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes5(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token = libsbml.XMLToken("<PASSWORD>")
p_node = libsbml.XMLNode(p_token1)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("<PASSWORD>")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(p_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes6(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("<PASSWORD> my <PASSWORD>")
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("<PASSWORD> more <PASSWORD>")
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(body_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes7(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token = libsbml.XMLToken("<PASSWORD>")
p_token = libsbml.XMLToken(p_triple,att)
p_node = libsbml.XMLNode(p_token1)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("<PASSWORD>")
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(p_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes8(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("<PASSWORD> my text")
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token1 = libsbml.XMLToken("<PASSWORD> is more text")
p_node1 = libsbml.XMLNode(p_token1)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
self.S.setNotes(body_node)
self.S.appendNotes(p_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotesString(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
"</notes>")
taggednewnotes2 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
newnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>";
newnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
newnotes3 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" + "</notes>")
newnotes4 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.assert_( self.S.isSetNotes() == True )
self.S.appendNotes(newnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_appendNotesString1(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString2(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>\n")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString3(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
taggednewnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n";
addnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
addnotes3 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" + "</notes>")
addnotes4 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_appendNotesString4(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString5(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString6(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString7(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString8(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = ("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
taggednewnotes2 = ("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
" </body>\n" +
"</notes>")
addnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>";
addnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
addnotes3 = wrapString("<notes>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
"</notes>")
addnotes4 = wrapString("<notes>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_getQualifiersFromResources(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
self.assert_( self.S.getResourceBiologicalQualifier( "foo") == libsbml.BQB_ENCODES )
cv1 = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
cv1.setModelQualifierType(libsbml.BQM_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
self.assert_( self.S.getResourceModelQualifier( "bar") == libsbml.BQM_IS )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setAnnotation(self):
token = libsbml.XMLToken("This is a test note")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
self.assert_( self.S.isSetAnnotation() == True )
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
if (self.S.getAnnotation() == node):
pass
self.S.setAnnotation(self.S.getAnnotation())
self.assert_(( "This is a test note" == self.S.getAnnotation().getChild(0).getCharacters() ))
self.S.setAnnotation(None)
self.assert_( self.S.isSetAnnotation() == False )
if (self.S.getAnnotation() != None):
pass
self.S.setAnnotation(node)
self.assert_( self.S.isSetAnnotation() == True )
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
token = libsbml.XMLToken("(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; �a8; ¨ ¨")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
s = t1.getChild(0).toXMLString()
expected = "(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; &#00a8; &#0168 &#x00a8";
self.assert_(( expected == s ))
token = libsbml.XMLToken("& ' > < \" & ' > < "")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
s2 = t1.getChild(0).toXMLString()
expected2 = "& ' > < " & ' > < "";
self.assert_(( expected2 == s2 ))
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setAnnotationString(self):
annotation = "This is a test note";
taggedannotation = "<annotation>This is a test note</annotation>";
self.S.setAnnotation(annotation)
self.assert_( self.S.isSetAnnotation() == True )
if (( taggedannotation != self.S.getAnnotationString() )):
pass
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
self.S.setAnnotation(self.S.getAnnotationString())
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
chars = self.S.getAnnotationString()
self.assert_(( taggedannotation == chars ))
self.S.setAnnotation( "")
self.assert_( self.S.isSetAnnotation() == False )
if (self.S.getAnnotationString() != None):
pass
self.S.setAnnotation(taggedannotation)
self.assert_( self.S.isSetAnnotation() == True )
if (( taggedannotation != self.S.getAnnotationString() )):
pass
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
pass
def test_SBase_setMetaId(self):
metaid = "x12345";
self.S.setMetaId(metaid)
self.assert_(( metaid == self.S.getMetaId() ))
self.assertEqual( True, self.S.isSetMetaId() )
if (self.S.getMetaId() == metaid):
pass
self.S.setMetaId(self.S.getMetaId())
self.assert_(( metaid == self.S.getMetaId() ))
self.S.setMetaId("")
self.assertEqual( False, self.S.isSetMetaId() )
if (self.S.getMetaId() != None):
pass
pass
def test_SBase_setNotes(self):
c = libsbml.Model(1,2)
token = libsbml.XMLToken("<PASSWORD>")
node = libsbml.XMLNode(token)
c.setNotes(node)
self.assert_( c.isSetNotes() == True )
if (c.getNotes() == node):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
c.setNotes(c.getNotes())
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = t1.getChild(0).getCharacters()
self.assert_(( "This is a test note" == chars ))
c.setNotes(None)
self.assert_( c.isSetNotes() == False )
if (c.getNotes() != None):
pass
c.setNotes(node)
self.assert_( c.isSetNotes() == True )
token = libsbml.XMLToken("(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; �a8; ¨ ¨")
node = libsbml.XMLNode(token)
c.setNotes(node)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
s = t1.getChild(0).toXMLString()
expected = "(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; &#00a8; &#0168 &#x00a8";
self.assert_(( expected == s ))
token = libsbml.XMLToken("& ' > < \" & ' > < "")
node = libsbml.XMLNode(token)
c.setNotes(node)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
s2 = t1.getChild(0).toXMLString()
expected2 = "& ' > < " & ' > < "";
self.assert_(( expected2 == s2 ))
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setNotesString(self):
c = libsbml.Model(1,2)
notes = "This is a test note";
taggednotes = "<notes>This is a test note</notes>";
c.setNotes(notes)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
c.setNotes(c.getNotesString())
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = c.getNotesString()
self.assert_(( taggednotes == chars ))
c.setNotes("")
self.assert_( c.isSetNotes() == False )
if (c.getNotesString() != None):
pass
c.setNotes(taggednotes)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
pass
def test_SBase_setNotesString_l3(self):
c = libsbml.Model(3,1)
notes = "This is a test note";
c.setNotes(notes)
self.assert_( c.isSetNotes() == False )
pass
def test_SBase_setNotesString_l3_addMarkup(self):
c = libsbml.Model(3,1)
notes = "This is a test note";
taggednotes = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note</p>\n" +
"</notes>")
c.setNotes(notes, True)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_( t2.getNumChildren() == 1 )
t3 = t2.getChild(0)
self.assert_(( "This is a test note" == t3.getCharacters() ))
c.setNotes(c.getNotesString(), True)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = c.getNotesString()
self.assert_(( taggednotes == chars ))
c.setNotes("", True)
self.assert_( c.isSetNotes() == False )
if (c.getNotesString() != None):
pass
c.setNotes(taggednotes, True)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_( t2.getNumChildren() == 1 )
t3 = t2.getChild(0)
self.assert_(( "This is a test note" == t3.getCharacters() ))
pass
def test_SBase_unsetAnnotationWithCVTerms(self):
annt = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
"</annotation>")
annt_with_cvterm = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" " +
"xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " +
"xmlns:dcterms=\"http://purl.org/dc/terms/\" " +
"xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" " +
"xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" " +
"xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <bqbiol:is>\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:resource=\"http://www.geneontology.org/#GO:0005895\"/>\n" +
" </rdf:Bag>\n" +
" </bqbiol:is>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.S.setAnnotation(annt)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
self.S.setAnnotation(annt)
self.S.setMetaId( "_000001")
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_IS)
cv.addResource( "http://www.geneontology.org/#GO:0005895")
self.S.addCVTerm(cv)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt_with_cvterm == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_unsetAnnotationWithModelHistory(self):
h = libsbml.ModelHistory()
c = libsbml.ModelCreator()
annt = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
"</annotation>")
annt_with_modelhistory = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" " +
"xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " +
"xmlns:dcterms=\"http://purl.org/dc/terms/\" " +
"xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" " +
"xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" " +
"xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <dc:creator>\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:parseType=\"Resource\">\n" +
" <vCard:N rdf:parseType=\"Resource\">\n" +
" <vCard:Family>Keating</vCard:Family>\n" +
" <vCard:Given>Sarah</vCard:Given>\n" +
" </vCard:N>\n" +
" <vCard:EMAIL><EMAIL></vCard:EMAIL>\n" +
" </rdf:li>\n" +
" </rdf:Bag>\n" +
" </dc:creator>\n" +
" <dcterms:created rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-12-29T12:15:45+02:00</dcterms:W3CDTF>\n" +
" </dcterms:created>\n" +
" <dcterms:modified rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-12-30T12:15:45+02:00</dcterms:W3CDTF>\n" +
" </dcterms:modified>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.S.setAnnotation(annt)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
self.S.setAnnotation(annt)
self.S.setMetaId( "_000001")
c.setFamilyName("Keating")
c.setGivenName("Sarah")
c.setEmail("<EMAIL>")
h.addCreator(c)
dc = libsbml.Date(2005,12,29,12,15,45,1,2,0)
h.setCreatedDate(dc)
dm = libsbml.Date(2005,12,30,12,15,45,1,2,0)
h.setModifiedDate(dm)
self.S.setModelHistory(h)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt_with_modelhistory == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ h ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_unsetCVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
cv1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv1.setBiologicalQualifierType(libsbml.BQB_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
cv2 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv2.setBiologicalQualifierType(libsbml.BQB_IS)
cv2.addResource( "bar1")
self.S.addCVTerm(cv2)
cv4 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv4.setBiologicalQualifierType(libsbml.BQB_IS)
cv4.addResource( "bar1")
self.S.addCVTerm(cv4)
self.assert_( self.S.getNumCVTerms() == 2 )
self.S.unsetCVTerms()
self.assert_( self.S.getNumCVTerms() == 0 )
#self.assert_( self.S.getCVTerms() == None )
self.assert_( len(self.S.getCVTerms()) == 0 )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv4 ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSBase))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
from __future__ import absolute_import, print_function, unicode_literals
# Do this before importing anything else, we need to add bundled requirements
# from the distributed version in case it exists before importing anything
# else.
# TODO: Do we want to manage the path at an even more fundametal place like
# kolibri.__init__ !? Load order will still matter...
import os # noqa
import sys # noqa
import kolibri # noqa
from kolibri import dist as kolibri_dist # noqa
# Setup path in case we are running with dependencies bundled into Kolibri
sys.path = [os.path.realpath(os.path.dirname(kolibri_dist.__file__))] + sys.path
import django # noqa
import importlib # noqa
import logging # noqa
import signal # noqa
from docopt import docopt # noqa
from logging import config as logging_config # noqa
from django.core.management import call_command # noqa
USAGE = """
Kolibri
Supported by Foundation for Learning Equality
www.learningequality.org
Usage:
kolibri start [--foreground --watch] [options] [-- DJANGO_OPTIONS ...]
kolibri stop [options] [-- DJANGO_OPTIONS ...]
kolibri restart [options] [-- DJANGO_OPTIONS ...]
kolibri status [options]
kolibri shell [options] [-- DJANGO_OPTIONS ...]
kolibri manage [options] COMMAND [-- DJANGO_OPTIONS ...]
kolibri diagnose [options]
kolibri plugin [options] PLUGIN (enable | disable)
kolibri plugin --list
kolibri -h | --help
kolibri --version
Options:
-h --help Show this screen.
--version Show version.
COMMAND The name of any available django manage command. For
help, type `kolibri manage help`
--debug Output debug messages (for development)
--port=<arg> Use a non-default port on which to start the HTTP server
or to query an existing server (stop/status)
DJANGO_OPTIONS All options are passed on to the django manage command.
Notice that all django options must appear *last* and
should not be mixed with other options. Only long-name
options ('--long-name') are supported.
Examples:
kolibri start Start Kolibri
kolibri stop Stop Kolibri
kolibri status How is Kolibri doing?
kolibri url Tell me the address of Kolibri
kolibri shell Display a Django shell
kolibri manage help Show the Django management usage dialogue
kolibri manage runserver Runs Django's development server
kolibri diagnose Show system information for debugging
Environment:
DJANGO_SETTINGS_MODULE
- The Django settings module to load. Useful if you are deploying Kolibri
in a specific setup such as your own web server.
- Default: "kolibri.deployment.default.settings.base"
KOLIBRI_HOME
- Where Kolibri will store its data and configuration files. If you are using
an external drive
"""
__doc__ = """
Kolibri Command Line Interface (CLI)
====================================
Auto-generated usage instructions from ``kolibri -h``::
{usage:s}
""".format(usage="\n".join(map(lambda x: " " + x, USAGE.split("\n"))))
# Set default env
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kolibri.deployment.default.settings.base")
os.environ.setdefault("KOLIBRI_HOME", os.path.join(os.path.expanduser("~"), ".kolibri"))
os.environ.setdefault("KOLIBRI_LISTEN_PORT", "8008")
logger = logging.getLogger(__name__)
KOLIBRI_HOME = os.environ['KOLIBRI_HOME']
VERSION_FILE = os.path.join(KOLIBRI_HOME, '.data_version')
class PluginDoesNotExist(Exception):
"""
This exception is local to the CLI environment in case actions are performed
on a plugin that cannot be loaded.
"""
pass
def _first_run():
"""
Called once at least. Will not run if the .kolibri/.version file is
found.
"""
if os.path.exists(VERSION_FILE):
logger.error("_first_run() called, but Kolibri is already initialized.")
return
logger.info("Kolibri running for the first time.")
logger.info(
"We don't yet use pre-migrated database seeds, so you're going to have "
"to wait a bit while we create a blank database...\n\n"
)
django.setup()
from kolibri.core.settings import SKIP_AUTO_DATABASE_MIGRATION, DEFAULT_PLUGINS
if not SKIP_AUTO_DATABASE_MIGRATION:
call_command("migrate", interactive=False)
for plugin_module in DEFAULT_PLUGINS:
try:
plugin(plugin_module, enable=True)
except PluginDoesNotExist:
continue
logger.info("Automatically enabling applications.")
with open(VERSION_FILE, "w") as f:
f.write(kolibri.__version__)
def initialize(debug=False):
"""
Always called before running commands
:param: debug: Tells initialization to setup logging etc.
"""
setup_logging(debug=debug)
if not os.path.isfile(VERSION_FILE):
_first_run()
def setup_logging(debug=False):
"""Configures logging in cases where a Django environment is not supposed
to be configured"""
try:
from django.conf.settings import LOGGING
except ImportError:
from kolibri.deployment.default.settings.base import LOGGING
if debug:
from django.conf import settings
settings.DEBUG = True
LOGGING['handlers']['console']['level'] = 'DEBUG'
LOGGING['loggers']['kolibri']['level'] = 'DEBUG'
logging_config.dictConfig(LOGGING)
logger.debug("Debug mode is on!")
def manage(cmd, args=[]):
"""
Invokes a django command
:param: cmd: The command to invoke, for instance "runserver"
:param: args: arguments for the command
"""
# Set sys.argv to correctly reflect the way we invoke kolibri as a Python
# module
sys.argv = ["-m", "kolibri"] + sys.argv[1:]
from django.core.management import execute_from_command_line
argv = ['kolibri manage', cmd] + args
execute_from_command_line(argv=argv)
def plugin(plugin_name, **args):
"""
Receives a plugin identifier and tries to load its main class. Calls class
functions.
"""
from kolibri.utils import conf
plugin_classes = []
from kolibri.plugins.base import KolibriPluginBase # NOQA
# Try to load kolibri_plugin from given plugin module identifier
try:
plugin_module = importlib.import_module(plugin_name + ".kolibri_plugin")
for obj in plugin_module.__dict__.values():
if type(obj) == type and obj is not KolibriPluginBase and issubclass(obj, KolibriPluginBase):
plugin_classes.append(obj)
except ImportError as e:
if e.message.startswith("No module named"):
raise PluginDoesNotExist("Plugin '{}' does not seem to exist. Is it on the PYTHONPATH?".format(plugin_name))
else:
raise
if args.get('enable', False):
for klass in plugin_classes:
klass.enable()
if args.get('disable', False):
for klass in plugin_classes:
klass.disable()
conf.save()
def main(args=None):
"""
Kolibri's main function. Parses arguments and calls utility functions.
Utility functions should be callable for unit testing purposes, but remember
to use main() for integration tests in order to test the argument API.
"""
# ensure that Django is set up before we do anything else
django.setup()
if not args:
args = sys.argv[1:]
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Split out the parts of the argument list that we pass on to Django
# and don't feed to docopt.
if '--' in args:
pivot = args.index('--')
args, django_args = args[:pivot], args[pivot + 1:]
else:
django_args = []
docopt_kwargs = dict(
version=str(kolibri.__version__),
options_first=False,
)
if args:
docopt_kwargs['argv'] = args
arguments = docopt(USAGE, **docopt_kwargs)
debug = arguments['--debug']
initialize(debug=debug)
# Alias
if arguments['shell']:
arguments['manage'] = True
arguments['COMMAND'] = 'shell'
if arguments['manage']:
command = arguments['COMMAND']
manage(command, args=django_args)
return
if arguments['plugin']:
plugin_name = arguments['PLUGIN']
plugin(plugin_name, **arguments)
return
if arguments['start']:
# import from server.py here to avoid circular imports caused by importing kolibri.deployment.default.wsgi
from . import server
server.start()
return
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib.ticker as ticker
import scipy.spatial.distance as spd
import scipy.cluster.hierarchy as sph
from scipy import stats
import matplotlib
#matplotlib.use('Agg')
import pylab
import pandas as pd
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import cPickle as pickle
sys.setrecursionlimit(10000)
# samples on rows
class SqrtNorm(matplotlib.colors.Normalize):
"""
Normalize a given value to the 0-1 range on a square root scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
result = np.ma.masked_less_equal(result, 0, copy=False)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = np.ma.getmask(result)
result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= resdat <= 0
matplotlib.cbook._putmask(resdat, mask, 1)
np.sqrt(resdat, resdat)
resdat -= np.sqrt(vmin)
resdat /= (np.sqrt(vmax) - np.sqrt(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if matplotlib.cbook.iterable(value):
val = np.ma.asarray(value)
return vmin * np.ma.power((vmax / vmin), val)
else:
return vmin * pow((vmax / vmin), value)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
A = np.ma.masked_less_equal(A, 0, copy=False)
self.vmin = np.ma.min(A)
self.vmax = np.ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is not None and self.vmax is not None:
return
A = np.ma.masked_less_equal(A, 0, copy=False)
if self.vmin is None:
self.vmin = np.ma.min(A)
if self.vmax is None:
self.vmax = np.ma.max(A)
class DataMatrix:
datatype = 'data_matrix'
@staticmethod
def input_parameters( parser ):
dm_param = parser.add_argument_group('Input data matrix parameters')
arg = dm_param.add_argument
arg( '--sep', type=str, default='\t' )
arg( '--out_table', type=str, default=None,
help = 'Write processed data matrix to file' )
arg( '--fname_row', type=int, default=0,
help = "row number containing the names of the features "
"[default 0, specify -1 if no names are present in the matrix")
arg( '--sname_row', type=int, default=0,
help = "column number containing the names of the samples "
"[default 0, specify -1 if no names are present in the matrix")
arg( '--metadata_rows', type=str, default=None,
help = "Row numbers to use as metadata"
"[default None, meaning no metadata")
arg( '--skip_rows', type=str, default=None,
help = "Row numbers to skip (0-indexed, comma separated) from the input file"
"[default None, meaning no rows skipped")
arg( '--sperc', type=int, default=90,
help = "Percentile of sample value distribution for sample selection" )
arg( '--fperc', type=int, default=90,
help = "Percentile of feature value distribution for sample selection" )
arg( '--stop', type=int, default=None,
help = "Number of top samples to select (ordering based on percentile specified by --sperc)" )
arg( '--ftop', type=int, default=None,
help = "Number of top features to select (ordering based on percentile specified by --fperc)" )
arg( '--def_na', type=float, default=None,
help = "Set the default value for missing values [default None which means no replacement]")
def __init__( self, input_file, args ):
self.args = args
self.metadata_rows = []
self.metadata_table = None
toskip = [int(l) for l in self.args.skip_rows.split(",")] if self.args.skip_rows else []
if self.args.metadata_rows:
self.metadata_rows = list([int(a) for a in self.args.metadata_rows.split(",")])
mdr = self.metadata_rows[::]
for t in toskip:
for i,m in enumerate(mdr):
if t <= m:
self.metadata_rows[i] -= 1
if self.metadata_rows:
header = [self.args.fname_row]+self.metadata_rows if self.args.fname_row > -1 else self.metadata_rows
else:
header = self.args.fname_row if self.args.fname_row > -1 else None
self.table = pd.read_table(
input_file, sep = self.args.sep, # skipinitialspace = True,
skiprows = sorted(toskip) if isinstance(toskip, list) else toskip,
header = sorted(header) if isinstance(header, list) else header,
index_col = self.args.sname_row if self.args.sname_row > -1 else None
)
def select( perc, top ):
self.table['perc'] = self.table.apply(lambda x: stats.scoreatpercentile(x,perc),axis=1)
m = sorted(self.table['perc'])[-top]
self.table = self.table[self.table['perc'] >= m ]
del self.table['perc']
if not self.args.def_na is None:
self.table = self.table.fillna( self.args.def_na )
if self.args.ftop:
select( self.args.fperc, self.args.ftop )
if self.args.stop:
self.table = self.table.T
select( self.args.sperc, self.args.stop )
self.table = self.table.T
# add missing values
def get_numpy_matrix( self ):
return np.matrix(self.table)
#def get_metadata_matrix( self ):
# return self.table.columns
def get_snames( self ):
#return list(self.table.index)
return self.table.columns
def get_fnames( self ):
#print self.table.columns.names
#print self.table.columns
return list(self.table.index)
def get_averages(self, by_row = True) :
return self.table.mean(axis = 1 if by_row else 0)
def save_matrix( self, output_file ):
self.table.to_csv( output_file, sep = '\t' )
class DistMatrix:
datatype = 'distance_matrix'
@staticmethod
def input_parameters( parser ):
dm_param = parser.add_argument_group('Distance parameters')
arg = dm_param.add_argument
dist_funcs = [ "euclidean","minkowski","cityblock","seuclidean",
"sqeuclidean","cosine","correlation","hamming",
"jaccard","chebyshev","canberra","braycurtis",
"mahalanobis","yule","matching","dice",
"kulsinski","rogerstanimoto","russellrao","sokalmichener",
"sokalsneath","wminkowski","ward" ]
arg( '--f_dist_f', type=str, default="correlation",
help = "Distance function for features [default correlation]")
arg( '--s_dist_f', type=str, default="euclidean",
help = "Distance function for sample [default euclidean]")
arg( '--load_dist_matrix_f', type=str, default=None,
help = "Load the distance matrix to be used for features [default None].")
arg( '--load_dist_matrix_s', type=str, default=None,
help = "Load the distance matrix to be used for samples [default None].")
arg( '--save_dist_matrix_f', type=str, default=None,
help = "Save the distance matrix for features to file [default None].")
arg( '--save_dist_matrix_s', type=str, default=None,
help = "Save the distance matrix for samples to file [default None].")
def __init__( self, data, args = None ):
self.sdf = args.s_dist_f
self.fdf = args.f_dist_f
self.s_cdist_matrix, self.f_cdist_matrix = None, None
self.numpy_full_matrix = (data if
type(data) == np.matrixlib.defmatrix.matrix else None)
def compute_f_dists( self ):
if args.load_dist_matrix_f:
with open( args.load_dist_matrix_f ) as inp:
self.f_cdist_matrix = pickle.load( inp )
else:
dt = self.numpy_full_matrix
if self.fdf == "spearman":
dt_ranked = np.matrix([stats.rankdata(d) for d in dt])
self.f_cdist_matrix = spd.pdist( dt_ranked, "correlation" )
return
if self.fdf == "pearson":
self.fdf = 'correlation'
self.f_cdist_matrix = spd.pdist( dt, self.fdf )
if args.save_dist_matrix_f:
with open( args.save_dist_matrix_f, "wb" ) as outf:
pickle.dump( self.f_cdist_matrix, outf )
def compute_s_dists( self ):
if args.load_dist_matrix_s:
with open( args.load_dist_matrix_s ) as inp:
self.s_cdist_matrix = pickle.load( inp )
else:
dt = self.numpy_full_matrix.transpose()
if self.sdf == "spearman":
dt_ranked = np.matrix([stats.rankdata(d) for d in dt])
self.s_cdist_matrix = spd.pdist( dt_ranked, "correlation" )
return
if self.sdf == "pearson":
self.sdf = 'correlation'
self.s_cdist_matrix = spd.pdist( dt, self.sdf )
if args.save_dist_matrix_s:
with open( args.save_dist_matrix_s, "wb" ) as outf:
pickle.dump( self.s_cdist_matrix, outf )
def get_s_dm( self ):
return self.s_cdist_matrix
def get_f_dm( self ):
return self.f_cdist_matrix
class HClustering:
datatype = 'hclustering'
@staticmethod
def input_parameters( parser ):
cl_param = parser.add_argument_group('Clustering parameters')
arg = cl_param.add_argument
linkage_method = [ "single","complete","average",
"weighted","centroid","median",
"ward" ]
arg( '--no_fclustering', action='store_true',
help = "avoid clustering features" )
arg( '--no_sclustering', action='store_true',
help = "avoid clustering samples" )
arg( '--flinkage', type=str, default="average",
help = "Linkage method for feature clustering [default average]")
arg( '--slinkage', type=str, default="average",
help = "Linkage method for sample clustering [default average]")
def get_reordered_matrix( self, matrix, sclustering = True, fclustering = True ):
if not sclustering and not fclustering:
return matrix
idx1 = self.sdendrogram['leaves'] if sclustering else None # !!!!!!!!!!!
idx2 = self.fdendrogram['leaves'][::-1] if fclustering else None
if sclustering and fclustering:
return matrix[idx2,:][:,idx1]
if fclustering:
return matrix[idx2,:][:]
if sclustering: # !!!!!!!!!!!!
return matrix[:][:,idx1]
def get_reordered_sample_labels( self, slabels ):
return [slabels[i] for i in self.sdendrogram['leaves']]
def get_reordered_feature_labels( self, flabels ):
return [flabels[i] for i in self.fdendrogram['leaves']]
def __init__( self, s_dm, f_dm, args = None ):
self.s_dm = s_dm
self.f_dm = f_dm
self.args = args
self.sclusters = None
self.fclusters = None
self.sdendrogram = None
self.fdendrogram = None
def shcluster( self, dendrogram = True ):
self.shclusters = sph.linkage( self.s_dm, args.slinkage )
if dendrogram:
self.sdendrogram = sph.dendrogram( self.shclusters, no_plot=True )
def fhcluster( self, dendrogram = True ):
self.fhclusters = sph.linkage( self.f_dm, args.flinkage )
if dendrogram:
self.fdendrogram = sph.dendrogram( self.fhclusters, no_plot=True )
def get_shclusters( self ):
return self.shclusters
def get_fhclusters( self ):
return self.fhclusters
def get_sdendrogram( self ):
return self.sdendrogram
def get_fdendrogram( self ):
return self.fdendrogram
class Heatmap:
datatype = 'heatmap'
bbcyr = {'red': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 1.0, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.0, 1.0)),
'blue': ( (0.0, 0.0, 0.0),
(0.25, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.75, 0.0, 0.0),
(1.0, 0.0, 1.0))}
bbcry = {'red': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ( (0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 1.0, 1.0),
(0.75, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ( (0.0, 0.0, 0.0),
(0.25, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.75, 0.0, 0.0),
(1.0, 0.0, 1.0))}
bcry = {'red': ( (0.0, 0.0, 0.0),
(0.33, 0.0, 0.0),
(0.66, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ( (0.0, 0.0, 0.0),
(0.33, 1.0, 1.0),
(0.66, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ( (0.0, 1.0, 1.0),
(0.33, 1.0, 1.0),
(0.66, 0.0, 0.0),
(1.0, 0.0, 1.0))}
my_colormaps = [ ('bbcyr',bbcyr),
('bbcry',bbcry),
('bcry',bcry)]
dcols = ['#ca0000','#0087ff','#00ba1d','#cf00ff','#00dbe2','#ffaf00','#0017f4','#006012','#e175ff','#877878','#050505','#b5cf00','#ff8a8a','#aa6400','#50008a','#00ff58']
@staticmethod
def input_parameters( parser ):
hm_param = parser.add_argument_group('Heatmap options')
arg = hm_param.add_argument
arg( '--dpi', type=int, default=150,
help = "Image resolution in dpi [default 150]")
arg( '-l', '--log_scale', action='store_true',
help = "Log scale" )
arg( '-s', '--sqrt_scale', action='store_true',
help = "Square root scale" )
arg( '--no_slabels', action='store_true',
help = "Do not show sample labels" )
arg( '--minv', type=float, default=None,
help = "Minimum value to display in the color map [default None meaning automatic]" )
arg( '--maxv', type=float, default=None,
help = "Maximum value to display in the color map [default None meaning automatic]" )
arg( '--no_flabels', action='store_true',
help = "Do not show feature labels" )
arg( '--max_slabel_len', type=int, default=25,
help = "Max number of chars to report for sample labels [default 15]" )
arg( '--max_flabel_len', type=int, default=25,
help = "Max number of chars to report for feature labels [default 15]" )
arg( '--flabel_size', type=int, default=10,
help = "Feature label font size [default 10]" )
arg( '--slabel_size', type=int, default=10,
help = "Sample label font size [default 10]" )
arg( '--fdend_width', type=float, default=1.0,
help = "Width of the feature dendrogram [default 1 meaning 100%% of default heatmap width]")
arg( '--sdend_height', type=float, default=1.0,
help = "Height of the sample dendrogram [default 1 meaning 100%% of default heatmap height]")
arg( '--metadata_height', type=float, default=.05,
help = "Height of the metadata panel [default 0.05 meaning 5%% of default heatmap height]")
arg( '--metadata_separation', type=float, default=.01,
help = "Distance between the metadata and data panels. [default 0.001 meaning 0.1%% of default heatmap height]")
arg( '--image_size', type=float, default=8,
help = "Size of the largest between width and eight size for the image in inches [default 8]")
arg( '--cell_aspect_ratio', type=float, default=1.0,
help = "Aspect ratio between width and height for the cells of the heatmap [default 1.0]")
col_maps = ['Accent', 'Blues', 'BrBG', 'BuGn', 'BuPu', 'Dark2', 'GnBu',
'Greens', 'Greys', 'OrRd', 'Oranges', 'PRGn', 'Paired',
'Pastel1', 'Pastel2', 'PiYG', 'PuBu', 'PuBuGn', 'PuOr',
'PuRd', 'Purples', 'RdBu', 'RdGy', 'RdPu', 'RdYlBu', 'RdYlGn',
'Reds', 'Set1', 'Set2', 'Set3', 'Spectral', 'YlGn', 'YlGnBu',
'YlOrBr', 'YlOrRd', 'afmhot', 'autumn', 'binary', 'bone',
'brg', 'bwr', 'cool', 'copper', 'flag', 'gist_earth',
'gist_gray', 'gist_heat', 'gist_ncar', 'gist_rainbow',
'gist_stern', 'gist_yarg', 'gnuplot', 'gnuplot2', 'gray',
'hot', 'hsv', 'jet', 'ocean', 'pink', 'prism', 'rainbow',
'seismic', 'spectral', 'spring', 'summer', 'terrain', 'winter'] + [n for n,c in Heatmap.my_colormaps]
for n,c in Heatmap.my_colormaps:
my_cmap = matplotlib.colors.LinearSegmentedColormap(n,c,256)
pylab.register_cmap(name=n,cmap=my_cmap)
arg( '-c','--colormap', type=str, choices = col_maps, default = 'bbcry' )
arg( '--bottom_c', type=str, default = None,
help = "Color to use for cells below the minimum value of the scale [default None meaning bottom color of the scale]")
arg( '--top_c', type=str, default = None,
help = "Color to use for cells below the maximum value of the scale [default None meaning bottom color of the scale]")
arg( '--nan_c', type=str, default = None,
help = "Color to use for nan cells [default None]")
"""
arg( '--', type=str, default="average",
help = "Linkage method for feature clustering [default average]")
arg( '--slinkage', type=str, default="average",
help = "Linkage method for sample clustering [default average]")
"""
def __init__( self, numpy_matrix, sdendrogram, fdendrogram, snames, fnames, fnames_meta, args = None ):
self.numpy_matrix = numpy_matrix
self.sdendrogram = sdendrogram
self.fdendrogram = fdendrogram
self.snames = snames
self.fnames = fnames
self.fnames_meta = fnames_meta
self.ns,self.nf = self.numpy_matrix.shape
self.args = args
def make_legend( self, dmap, titles, out_fn ):
figlegend = plt.figure(figsize=(1+3*len(titles),2), frameon = False)
gs = gridspec.GridSpec( 1, len(dmap), wspace = 2.0 )
for i,(d,title) in enumerate(zip(dmap,titles)):
legax = plt.subplot(gs[i],frameon = False)
for k,v in sorted(d.items(),key=lambda x:x[1]):
rect = Rectangle( [0.0, 0.0], 0.0, 0.0,
facecolor = self.dcols[v%len(self.dcols)],
label = k,
edgecolor='b', lw = 0.0)
legax.add_patch(rect)
#remove_splines( legax )
legax.set_xticks([])
legax.set_yticks([])
legax.legend( loc = 2, frameon = False, title = title)
"""
ncol = legend_ncol, bbox_to_anchor=(1.01, 3.),
borderpad = 0.0, labelspacing = 0.0,
handlelength = 0.5, handletextpad = 0.3,
borderaxespad = 0.0, columnspacing = 0.3,
prop = {'size':fontsize}, frameon = False)
"""
if out_fn:
figlegend.savefig(out_fn, bbox_inches='tight')
def draw( self ):
rat = float(self.ns)/self.nf
rat *= self.args.cell_aspect_ratio
x,y = (self.args.image_size,rat*self.args.image_size) if rat < 1 else (self.args.image_size/rat,self.args.image_size)
fig = plt.figure( figsize=(x,y), facecolor = 'w' )
cm = pylab.get_cmap(self.args.colormap)
bottom_col = [ cm._segmentdata['red'][0][1],
cm._segmentdata['green'][0][1],
cm._segmentdata['blue'][0][1] ]
if self.args.bottom_c:
bottom_col = self.args.bottom_c
cm.set_under( bottom_col )
top_col = [ cm._segmentdata['red'][-1][1],
cm._segmentdata['green'][-1][1],
cm._segmentdata['blue'][-1][1] ]
if self.args.top_c:
top_col = self.args.top_c
cm.set_over( top_col )
if self.args.nan_c:
cm.set_bad( self.args.nan_c )
def make_ticklabels_invisible(ax):
for tl in ax.get_xticklabels() + ax.get_yticklabels():
tl.set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
def remove_splines( ax ):
for v in ['right','left','top','bottom']:
ax.spines[v].set_color('none')
def shrink_labels( labels, n ):
shrink = lambda x: x[:n/2]+" [...] "+x[-n/2:]
return [(shrink(str(l)) if len(str(l)) > n else l) for l in labels]
#gs = gridspec.GridSpec( 4, 2,
# width_ratios=[1.0-fr_ns,fr_ns],
# height_ratios=[.03,0.03,1.0-fr_nf,fr_nf],
# wspace = 0.0, hspace = 0.0 )
fr_ns = float(self.ns)/max([self.ns,self.nf])
fr_nf = float(self.nf)/max([self.ns,self.nf])
buf_space = 0.05
minv = min( [buf_space*8, 8*rat*buf_space] )
if minv < 0.05:
buf_space /= minv/0.05
metadata_height = self.args.metadata_height if type(snames[0]) is tuple and len(snames[0]) > 1 else 0.000001
gs = gridspec.GridSpec( 6, 4,
width_ratios=[ buf_space, buf_space*2, .08*self.args.fdend_width,0.9],
height_ratios=[ buf_space, buf_space*2, .08*self.args.sdend_height, metadata_height, self.args.metadata_separation, 0.9],
wspace = 0.0, hspace = 0.0 )
ax_hm = plt.subplot(gs[23], axisbg = bottom_col )
ax_metadata = plt.subplot(gs[15], axisbg = bottom_col )
ax_hm_y2 = ax_hm.twinx()
norm_f = matplotlib.colors.Normalize
if self.args.log_scale:
norm_f = matplotlib.colors.LogNorm
elif self.args.sqrt_scale:
norm_f = SqrtNorm
minv, maxv = 0.0, None
maps, values, ndv = [], [], 0
if type(snames[0]) is tuple and len(snames[0]) > 1:
metadata = zip(*[list(s[1:]) for s in snames])
for m in metadata:
mmap = dict([(v[1],ndv+v[0]) for v in enumerate(list(set(m)))])
values.append([mmap[v] for v in m])
ndv += len(mmap)
maps.append(mmap)
dcols = []
mdmat = np.matrix(values)
while len(dcols) < ndv:
dcols += self.dcols
cmap = matplotlib.colors.ListedColormap(dcols[:ndv])
bounds = [float(f)-0.5 for f in range(ndv+1)]
imm = ax_metadata.imshow( mdmat, #origin='lower',
interpolation = 'nearest',
aspect='auto',
extent = [0, self.nf, 0, self.ns],
cmap=cmap,
vmin=bounds[0],
vmax=bounds[-1],
)
remove_splines( ax_metadata )
ax_metadata_y2 = ax_metadata.twinx()
ax_metadata_y2.set_ylim(0,len(self.fnames_meta))
ax_metadata.set_yticks([])
ax_metadata_y2.set_ylim(0,len(self.fnames_meta))
ax_metadata_y2.tick_params(length=0)
ax_metadata_y2.set_yticks(np.arange(len(self.fnames_meta))+0.5)
ax_metadata_y2.set_yticklabels(self.fnames_meta[::-1], va='center',size=self.args.flabel_size)
else:
ax_metadata.set_yticks([])
ax_metadata.set_xticks([])
im = ax_hm.imshow( self.numpy_matrix, #origin='lower',
interpolation = 'nearest', aspect='auto',
extent = [0, self.nf, 0, self.ns],
cmap=cm,
vmin=self.args.minv,
vmax=self.args.maxv,
norm = norm_f( vmin=minv if minv > 0.0 else None, vmax=maxv)
)
#ax_hm.set_ylim([0,800])
ax_hm.set_xticks(np.arange(len(list(snames)))+0.5)
if not self.args.no_slabels:
snames_short = shrink_labels( list([s[0] for s in snames]) if type(snames[0]) is tuple else snames, self.args.max_slabel_len )
ax_hm.set_xticklabels(snames_short,rotation=90,va='top',ha='center',size=self.args.slabel_size)
else:
ax_hm.set_xticklabels([])
ax_hm_y2.set_ylim([0,self.ns])
ax_hm_y2.set_yticks(np.arange(len(fnames))+0.5)
if not self.args.no_flabels:
fnames_short = shrink_labels( fnames, self.args.max_flabel_len )
ax_hm_y2.set_yticklabels(fnames_short,va='center',size=self.args.flabel_size)
else:
ax_hm_y2.set_yticklabels( [] )
ax_hm.set_yticks([])
remove_splines( ax_hm )
ax_hm.tick_params(length=0)
ax_hm_y2.tick_params(length=0)
#ax_hm.set_xlim([0,self.ns])
ax_cm = plt.subplot(gs[3], axisbg = 'r', frameon = False)
#fig.colorbar(im, ax_cm, orientation = 'horizontal', spacing = 'proportional', format = ticker.LogFormatterMathtext() )
fig.colorbar(im, ax_cm, orientation = 'horizontal', spacing='proportional' if self.args.sqrt_scale else 'uniform' ) # , format = ticker.LogFormatterMathtext() )
if not self.args.no_sclustering:
ax_den_top = plt.subplot(gs[11], axisbg = 'r', frameon = False)
sph._plot_dendrogram( self.sdendrogram['icoord'], self.sdendrogram['dcoord'], self.sdendrogram['ivl'],
self.ns + 1, self.nf + 1, 1, 'top', no_labels=True,
color_list=self.sdendrogram['color_list'] )
ymax = max([max(a) for a in self.sdendrogram['dcoord']])
ax_den_top.set_ylim([0,ymax])
make_ticklabels_invisible( ax_den_top )
if not self.args.no_fclustering:
ax_den_right = plt.subplot(gs[22], axisbg = 'b', frameon = False)
sph._plot_dendrogram( self.fdendrogram['icoord'], self.fdendrogram['dcoord'], self.fdendrogram['ivl'],
self.ns + 1, self.nf + 1, 1, 'right', no_labels=True,
color_list=self.fdendrogram['color_list'] )
xmax = max([max(a) for a in self.fdendrogram['dcoord']])
ax_den_right.set_xlim([xmax,0])
make_ticklabels_invisible( ax_den_right )
if not self.args.out:
plt.show( )
else:
fig.savefig( self.args.out, bbox_inches='tight', dpi = self.args.dpi )
if maps:
self.make_legend( maps, fnames_meta, self.args.legend_file )
class ReadCmd:
def __init__( self ):
import argparse as ap
import textwrap
p = ap.ArgumentParser( description= "TBA" )
arg = p.add_argument
arg( '-i', '--inp', '--in', metavar='INPUT_FILE', type=str, nargs='?', default=sys.stdin,
help= "The input matrix" )
arg( '-o', '--out', metavar='OUTPUT_FILE', type=str, nargs='?', default=None,
help= "The output image file [image on screen of not specified]" )
arg( '--legend_file', metavar='LEGEND_FILE', type=str, nargs='?', default=None,
help= "The output file for the legend of the provided metadata" )
input_types = [DataMatrix.datatype,DistMatrix.datatype]
arg( '-t', '--input_type', metavar='INPUT_TYPE', type=str, choices = input_types,
default='data_matrix',
help= "The input type can be a data matrix or distance matrix [default data_matrix]" )
DataMatrix.input_parameters( p )
DistMatrix.input_parameters( p )
HClustering.input_parameters( p )
Heatmap.input_parameters( p )
self.args = p.parse_args()
def check_consistency( self ):
pass
def get_args( self ):
return self.args
if __name__ == '__main__':
read = ReadCmd( )
read.check_consistency()
args = read.get_args()
if args.input_type == DataMatrix.datatype:
dm = DataMatrix( args.inp, args )
if args.out_table:
dm.save_matrix( args.out_table )
distm = DistMatrix( dm.get_numpy_matrix(), args = args )
if not args.no_sclustering:
distm.compute_s_dists()
if not args.no_fclustering:
distm.compute_f_dists()
elif args.input_type == DataMatrix.datatype:
# distm = read...
pass
else:
pass
cl = HClustering( distm.get_s_dm(), distm.get_f_dm(), args = args )
if not args.no_sclustering:
cl.shcluster()
if not args.no_fclustering:
cl.fhcluster()
hmp = dm.get_numpy_matrix()
fnames = dm.get_fnames()
snames = dm.get_snames()
fnames_meta = snames.names[1:]
#if not args.no_sclustering or not args.no_fclustering ):
hmp = cl.get_reordered_matrix( hmp, sclustering = not args.no_sclustering, fclustering = not args.no_fclustering )
if not args.no_sclustering:
snames = cl.get_reordered_sample_labels( snames )
if not args.no_fclustering:
fnames = cl.get_reordered_feature_labels( fnames )
else:
fnames = fnames[::-1]
hm = Heatmap( hmp, cl.sdendrogram, cl.fdendrogram, snames, fnames, fnames_meta, args = args )
hm.draw()
|
<reponame>Xxhhj1/doc-generate-1
from nltk import WordNetLemmatizer
from sekg.constant.code import CodeEntityCategory
from sekg.pipeline.component.base import Component
from sekg.text.extractor.domain_entity.identifier_util import IdentifierInfoExtractor
from sekg.text.spacy_pipeline.pipeline import PipeLineFactory
from sekg.util.code import CodeElementNameUtil
from sekg.util.vocabulary_conversion.vocabulary_conversion import VocabularyConversion
import re
from project.extractor_module.constant.constant import FunctionalityConstant, FeatureConstant, DomainConstant, \
RelationNameConstant
from project.extractor_module.data_model.statement_record import StatementRecord
from project.utils.json_tool import JsonTool
class BaseStructureExtractor(Component):
def __init__(self, ):
super().__init__()
self.code_name_tool = CodeElementNameUtil()
self.type_of_class = {
CodeEntityCategory.CATEGORY_CLASS,
CodeEntityCategory.CATEGORY_INTERFACE,
CodeEntityCategory.CATEGORY_EXCEPTION_CLASS,
CodeEntityCategory.CATEGORY_ERROR_CLASS,
CodeEntityCategory.CATEGORY_ENUM_CLASS,
CodeEntityCategory.CATEGORY_ANNOTATION_CLASS
}
self.type_of_method = {
CodeEntityCategory.CATEGORY_METHOD,
CodeEntityCategory.CATEGORY_CONSTRUCT_METHOD,
CodeEntityCategory.CATEGORY_BASE_OVERRIDE_METHOD,
}
self.lemmatizer = WordNetLemmatizer()
self.vocabulary_conversion_tool = VocabularyConversion()
self.identifier_extractor = IdentifierInfoExtractor()
self.nlp = PipeLineFactory.full_pipeline()
self.camel_cache = {}
self.func_relation_set = {
RelationNameConstant.has_Functionality_Relation,
RelationNameConstant.Functionality_Compare_Relation,
RelationNameConstant.has_Behavior_Relation,
}
self.concept_classification = {
RelationNameConstant.Ontology_IS_A_Relation,
}
self.membership = {
RelationNameConstant.Ontology_Derive_Relation,
}
self.characteristic = {
RelationNameConstant.has_Feature_Relation,
RelationNameConstant.has_Constraint_Relation,
}
# 保存结果的地方
self.api_id_2_statement = dict()
self.graph_out_path = None
self.json_tool = JsonTool()
self.json_save_path = None
def set_json_save_path(self, json_save_path):
self.json_save_path = json_save_path
def save_json(self, path: str):
for api_id in self.api_id_2_statement:
statement_list = self.api_id_2_statement[api_id]
for statement in statement_list:
self.json_tool.add_statement(api_id, statement)
self.json_tool.save_json(path)
def uncamelize_classname(self, classname):
"""
:param classname:
:return:
"""
if not classname:
return None
if re.match('^[0-9A-Z]+$', classname):
return classname
sub = re.sub(r'([A-Z0-9]+)([A-Z]+[a-z0-9])', r'\1 \2', classname).strip()
sub = re.sub(r'_', " ", sub)
sub = re.sub(r'([A-Z]+[0-9])([A-Z][a-z0-9]+)', r'\1 \2', sub)
sub = re.sub(r'([A-Z]*[0-9]?[A-Z]+)', r' \1', sub)
sub = re.sub(r'\s+', " ", sub).strip()
return sub
def extract_camle_name(self, name):
split_name = name.split('.')
last_name = split_name[len(split_name) - 1]
sent = ''.join(self.uncamelize_classname(last_name))
doc = self.nlp(sent)
ret = list()
ret.append(sent)
for word in doc:
if word.tag_ == 'NNP' or word.tag_ == 'NN':
ret.append(str(word.text).strip())
return set(ret)
def get_part_of_method_name(self, qualified_name):
return qualified_name.split("(")[0]
def uncamelize(self, camel_case):
"""
uncamelize the ontology named in camel format. eg. ArrayList->Array List
:param camel_case:
:return:
"""
if camel_case in self.camel_cache:
return self.camel_cache[camel_case]
sub = self.code_name_tool.uncamelize_by_stemming(camel_case)
self.camel_cache[camel_case] = sub
return sub
def create_statement_entity(self, statement: StatementRecord):
label_info = {"entity"}
type_class = self.get_record_entity_type_by_relation(statement.r_name)
label_info.add(str(type_class.LABEL))
label_info.add(str("statement"))
node_properties = {
type_class.PRIMARY_PROPERTY_NAME: statement.e_name,
}
for extra_info_key in statement.extra_info:
node_properties[extra_info_key] = statement.extra_info[extra_info_key]
node_properties["which_extractor"] = statement.which_extractor
node_properties["e_type"] = statement.e_type
node_properties["s_name"] = statement.s_name
node_properties["r_name"] = statement.r_name
graph_id = self.graph_data.add_node(label_info, node_properties,
primary_property_name=type_class.PRIMARY_PROPERTY_NAME)
return graph_id
def get_record_entity_type_by_relation(self, relation: str):
"""
@param relation:
@return:
"""
if relation in self.func_relation_set:
return FunctionalityConstant
if relation in self.concept_classification:
return DomainConstant
if relation in self.membership:
return DomainConstant
if relation in self.characteristic:
return FeatureConstant
else:
return DomainConstant
def add_relations(self, start_id, relation_str, end_id):
"""
添加图中的关系
@param start_id:
@param end_id:
@param relation_str:
@return:
"""
try:
self.graph_data.add_relation(start_id, relation_str, end_id)
except Exception as e:
print(e)
def after_run(self, **config):
super().after_run(**config)
print("after running component %r" % (self.type()))
counter = 0
for i, api_id in enumerate(self.api_id_2_statement):
statement_list = self.api_id_2_statement[api_id]
if i % 1000 == 0:
print(i)
counter += len(statement_list)
for statement in statement_list:
statement_node_id = self.create_statement_entity(statement)
self.add_relations(api_id, statement.r_name, statement_node_id)
self.graph_data.save(self.graph_out_path)
print("counter" + str(counter))
def set_save_path(self, p):
self.graph_out_path = p
|
<reponame>burhandodhy/CNTK<gh_stars>1000+
# =============================================================================
# copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import cntk
import numpy as np
import codecs
import os
from cntk.io import UserMinibatchSource, StreamInformation, MinibatchData
from math import ceil, sqrt
from converter import load_vocab_location_from_file, load_vocab_from_file
TEXT_ENCODING = 'utf-8'
UNK = '<unk>'
# a file reader can generate the feature-to-label
class FileReader(object):
'''A File Reader'''
def __init__(self, path):
self.input_file = codecs.open(path, 'r', encoding=TEXT_ENCODING)
self.pointer = self.generator()
self.mask = None
def reset(self):
self.input_file.seek(0)
self.pointer = self.generator()
self.mask = None
def generator(self):
'''Get next (feature, label)'''
for line in self.input_file:
words = line.split()
words_count_in_line = len(words)
for i in range(words_count_in_line):
if i == words_count_in_line - 1:
continue
yield (words[i], words[i + 1])
def next(self):
try:
if self.mask is not None:
sample = self.mask
self.mask = None
else:
sample = next(self.pointer)
except StopIteration:
return None
return sample
def hasnext(self):
if self.mask is not None:
return True
else:
self.mask = self.next()
if self.mask is not None:
return True
else:
return False
# Provides a override-MinibatchSource for parsing the text to a stream-to-data mapping
class DataSource(UserMinibatchSource):
def __init__(self, path, word_config, location_config, seqlength, batchsize):
self.word_index = load_vocab_from_file(word_config)
self.word_position = load_vocab_location_from_file(location_config)
self.vocab_dim = len(self.word_index)
self.vocab_base = int(ceil(sqrt(self.vocab_dim)))
self.reader = FileReader(path)
self.seqlength = seqlength
self.batchsize = batchsize
self.input1 = StreamInformation("input1", 0, 'sparse', np.float32, (self.vocab_base,))
self.input2 = StreamInformation("input2", 1, 'sparse', np.float32, (self.vocab_base,))
self.label1 = StreamInformation("label1", 2, 'sparse', np.float32, (self.vocab_base,))
self.label2 = StreamInformation("label2", 3, 'sparse', np.float32, (self.vocab_base,))
self.word1 = StreamInformation("word1", 4, 'dense', np.float32, (1,))
self.word2 = StreamInformation("word2", 5, 'dense', np.float32, (1,))
super(DataSource, self).__init__()
def stream_infos(self):
return [self.input1, self.input2, self.label1, self.label2, self.word1, self.word2]
def parse_word(self, word):
# Parse token to id
return self.word_index[word] if word in self.word_index else self.word_index[UNK]
def make_minibatch(self, samples):
# Make the next minibatch
source = [sample[0] for sample in samples]
target = [sample[1] for sample in samples]
def transform(x, w=False):
return np.reshape(x, (-1, self.seqlength, 1) if w else (-1, self.seqlength))
source = transform(source)
target = transform(target)
input1, label1, input2, label2, word1, word2 = [], [], [], [], [], []
for i in range(len(source)):
for w in range(len(source[i])):
input1.append(self.word_position[source[i][w]][0])
input2.append(self.word_position[source[i][w]][1])
label1.append(self.word_position[source[i][w]][1])
label2.append(self.word_position[target[i][w]][0])
word1.append(source[i][w])
word2.append(target[i][w])
return \
cntk.Value.one_hot(batch=transform(input1), num_classes=self.vocab_base), \
cntk.Value.one_hot(batch=transform(input2), num_classes=self.vocab_base), \
cntk.Value.one_hot(batch=transform(label1), num_classes=self.vocab_base), \
cntk.Value.one_hot(batch=transform(label2), num_classes=self.vocab_base), \
cntk.Value(batch=np.asarray(transform(word1, True), dtype=np.float32)), \
cntk.Value(batch=np.asarray(transform(word2, True), dtype=np.float32))
def next_minibatch(self, num_samples, number_of_workers=1, worker_rank=0, device=None):
samples = []
sweep_end = False
for i in range(num_samples):
feature_to_label = self.reader.next()
if feature_to_label is None:
samples = samples[: (len(samples) // self.seqlength) * self.seqlength]
self.reader.reset()
sweep_end = True
break
feature, label = feature_to_label
curr_word = self.parse_word(feature)
next_word = self.parse_word(label)
samples.append((curr_word, next_word))
batchsize = len(samples) / self.seqlength
# Divide batch into every gpu
batchrange = list(map(int, [
(batchsize // number_of_workers) * worker_rank,
min((batchsize // number_of_workers) * (worker_rank + 1), batchsize)
]))
samples = samples[batchrange[0] * self.seqlength: batchrange[1] * self.seqlength]
minibatch = self.make_minibatch(samples)
sample_count = len(samples)
num_seq = len(minibatch[0])
minibatch = {
self.input1: MinibatchData(minibatch[0], num_seq, sample_count, sweep_end),
self.input2: MinibatchData(minibatch[1], num_seq, sample_count, sweep_end),
self.label1: MinibatchData(minibatch[2], num_seq, sample_count, sweep_end),
self.label2: MinibatchData(minibatch[3], num_seq, sample_count, sweep_end),
self.word1: MinibatchData(minibatch[4], num_seq, sample_count, sweep_end),
self.word2: MinibatchData(minibatch[5], num_seq, sample_count, sweep_end)
}
return minibatch
|
<filename>redbrick/export/public.py
"""Public API to exporting."""
import asyncio
from typing import List, Dict, Optional, Tuple, Any
from functools import partial
import os
import json
import copy
from datetime import datetime
from shapely.geometry import Polygon # type: ignore
import skimage
import skimage.morphology # type: ignore
import numpy as np # type: ignore
from matplotlib import cm # type: ignore
import tqdm # type: ignore
from PIL import Image # type: ignore
from redbrick.common.context import RBContext
from redbrick.common.enums import LabelType
from redbrick.utils.files import uniquify_path, download_files
from redbrick.utils.logging import print_error, print_info, print_warning
from redbrick.utils.pagination import PaginationIterator
from redbrick.utils.rb_label_utils import clean_rb_label, flat_rb_format
from redbrick.coco.coco_main import coco_converter
def _parse_entry_latest(item: Dict) -> Dict:
task_id = item.get("taskId", "") or ""
task_data = item.get("latestTaskData", {}) or {}
datapoint = task_data.get("dataPoint", {}) or {}
items_presigned = datapoint.get("itemsPresigned", []) or []
items = datapoint.get("items", []) or []
name = datapoint.get("name", "") or ""
created_by = task_data.get("createdByEmail", "") or ""
labels = [
clean_rb_label(label)
for label in json.loads(task_data.get("labelsData", "[]") or "[]")
]
return flat_rb_format(
labels,
items,
items_presigned,
name,
created_by,
task_id,
item.get("currentStageName", "") or "",
task_data.get("labelsPath"),
)
def parse_output_entry(item: Dict) -> Dict:
"""Parse entry for output data."""
items_presigned = item.get("itemsPresigned", []) or []
items = item.get("items", []) or []
name = item.get("name", "") or ""
label_data = item.get("labelData", {}) or {}
created_by = label_data.get("createdByEmail", "") or ""
labels = [
clean_rb_label(label)
for label in json.loads(label_data.get("labelsData", "[]") or "[]")
]
task = item.get("task", {}) or {}
return flat_rb_format(
labels,
items,
items_presigned,
name,
created_by,
task.get("taskId", "") or "",
"END",
label_data.get("labelsPath"),
)
class Export:
"""
Primary interface to handling export from a project.
This class has methods to export to various formats depending on
your project type.
"""
def __init__(
self, context: RBContext, org_id: str, project_id: str, project_type: LabelType
) -> None:
"""Construct Export object."""
self.context = context
self.org_id = org_id
self.project_id = project_id
self.project_type = project_type
def _get_raw_data_ground_truth(self, concurrency: int) -> Tuple[List[Dict], Dict]:
temp = self.context.export.get_datapoints_output
my_iter = PaginationIterator(
partial(temp, self.org_id, self.project_id, concurrency)
)
general_info = self.context.export.get_output_info(self.org_id, self.project_id)
print_info("Downloading tasks")
with tqdm.tqdm(
my_iter, unit=" datapoints", total=general_info["datapointCount"]
) as progress:
datapoints = [parse_output_entry(val) for val in progress]
disable = progress.disable
progress.disable = False
progress.update(general_info["datapointCount"] - progress.n)
progress.disable = disable
return datapoints, general_info["taxonomy"]
def _get_raw_data_latest(
self, concurrency: int, cache_time: Optional[datetime] = None
) -> Tuple[List[Dict], Dict]:
temp = self.context.export.get_datapoints_latest
my_iter = PaginationIterator(
partial(temp, self.org_id, self.project_id, cache_time, concurrency)
)
general_info = self.context.export.get_output_info(self.org_id, self.project_id)
datapoint_count = self.context.export.datapoints_in_project(
self.org_id, self.project_id
)
print_info("Downloading tasks")
with tqdm.tqdm(my_iter, unit=" datapoints", total=datapoint_count) as progress:
datapoints = [_parse_entry_latest(val) for val in progress]
disable = progress.disable
progress.disable = False
progress.update(datapoint_count - progress.n)
progress.disable = disable
return datapoints, general_info["taxonomy"]
def _get_raw_data_single(self, task_id: str) -> Tuple[List[Dict], Dict]:
general_info = self.context.export.get_output_info(self.org_id, self.project_id)
datapoint = self.context.export.get_datapoint_latest(
self.org_id, self.project_id, task_id
)
return [_parse_entry_latest(datapoint)], general_info["taxonomy"]
@staticmethod
def _get_color(class_id: int) -> Any:
"""Get a color from class id."""
if class_id > 20:
color = (
np.array(cm.tab20b(int(class_id))) * 255 # pylint: disable=no-member
)
return color.astype(np.uint8)
color = np.array(cm.tab20c(int(class_id))) * 255 # pylint: disable=no-member
return color.astype(np.uint8)
@staticmethod
def tax_class_id_mapping(
taxonomy: Dict, class_id: Dict, color_map: Optional[Dict] = None
) -> None:
"""Create a class mapping from taxonomy categories to class_id."""
for category in taxonomy:
class_id[category["name"]] = category["classId"] + 1
# Create a color map
if color_map is not None:
color_map[category["name"]] = Export._get_color(category["classId"])[
0:3
].tolist() # not doing +1 here.
Export.tax_class_id_mapping(category["children"], class_id, color_map)
@staticmethod
def fill_mask_holes(mask: np.ndarray, max_hole_size: int) -> np.ndarray:
"""Fill holes."""
mask_copy = copy.deepcopy(mask)
# find indexes where mask has labels
mask_greater_zero = np.where(mask > 0)
# convery copy mask to binary
mask_copy[mask_greater_zero] = 1
# fill holes in copy binary mask
mask_copy = skimage.morphology.remove_small_holes(
mask_copy.astype(bool),
area_threshold=max_hole_size,
)
mask_copy = mask_copy.astype(int)
# set original pixel values
mask_copy[mask_greater_zero] = mask[mask_greater_zero]
# find indexes of holes, and fill with neighbor
mask_hole_loc = np.where((mask == 0) & (mask_copy > 0))
for i in range(len(mask_hole_loc[0])):
mask_copy = Export.fill_hole_with_neighbor(
mask_copy, mask_hole_loc[0][i], mask_hole_loc[1][i]
)
return mask_copy
@staticmethod
def fill_hole_with_neighbor(mask: np.ndarray, i: Any, j: Any) -> np.ndarray:
"""Fill a pixel in the mask with it's neighbors value."""
row, col = mask.shape
top = 0 if j - 1 < 0 else mask[i][j - 1]
top_right = 0 if (j - 1 < 0) or (i + 1 == row) else mask[i + 1][j - 1]
right = 0 if i + 1 == row else mask[i + 1][j]
bottom_right = 0 if (j + 1 == col) or (i + 1 == row) else mask[i + 1][j + 1]
bottom = 0 if j + 1 == col else mask[i][j + 1]
bottom_left = 0 if (i - 1 < 0) or (j + 1 == col) else mask[i - 1][j + 1]
left = 0 if i - 1 < 0 else mask[i - 1][j]
top_left = 0 if (i - 1 < 0) or (j - 1 == 0) else mask[i - 1][j - 1]
mask[i][j] = max(
top,
top_right,
right,
bottom_right,
bottom,
bottom_left,
left,
top_left,
)
return mask
@staticmethod
def convert_rbai_mask( # pylint: disable=too-many-locals
labels: List,
class_id_map: Dict,
fill_holes: bool = False,
max_hole_size: int = 30,
) -> np.ndarray:
"""Convert rbai datapoint to a numpy mask."""
try:
import rasterio.features # pylint: disable=import-error, import-outside-toplevel
except Exception as error:
print_error(
"For windows users, please follow the rasterio "
+ "documentation to properly install the module "
+ "https://rasterio.readthedocs.io/en/latest/installation.html "
+ "Rasterio is required by RedBrick SDK to work with masks."
)
raise error
imagesize = labels[0]["pixel"]["imagesize"]
# deal with condition where imagesize is returned as float
imagesize = np.round(imagesize).astype(int) # type: ignore
mask = np.zeros([imagesize[1], imagesize[0]])
for label in labels:
class_id = class_id_map[label["category"][0][-1]]
regions = copy.deepcopy(label["pixel"]["regions"])
holes = copy.deepcopy(label["pixel"]["holes"])
imagesize = label["pixel"]["imagesize"]
# deal with condition where imagesize is returned as float
imagesize = np.round(imagesize).astype(int) # type: ignore
# iterate through regions, and create region mask
region_mask = np.zeros([imagesize[1], imagesize[0]])
if regions and len(regions) > 0:
for region in regions:
if (
len(np.array(region).shape) == 1
or np.array(region).shape[0] < 3
):
# Don't add empty regions to the mask
# Don't add regions with < 3 vertices
break
# convert polygon to mask
region_polygon = Polygon(region)
single_region_mask = (
rasterio.features.rasterize(
[region_polygon],
out_shape=(imagesize[1], imagesize[0]),
).astype(float)
* class_id
)
# add single region to root region mask
region_mask += single_region_mask
# iterate through holes, and create hole mask
hole_mask = np.zeros([imagesize[1], imagesize[0]])
if holes and len(holes) > 0:
for hole in holes:
if len(np.array(hole).shape) == 1 or np.array(hole).shape[0] < 3:
# Don't add empty hole to negative mask
# Don't add holes with < 3 vertices
break
# convert polygon hole to mask
hole_polygon = Polygon(hole)
single_hole_mask = (
rasterio.features.rasterize(
[hole_polygon],
out_shape=(imagesize[1], imagesize[0]),
).astype(float)
* class_id
)
# add single hole mask to total hole mask
hole_mask += single_hole_mask
# subtract the hole mask from region mask
region_mask -= hole_mask
# cleanup:
# - remove overlapping region values
neg_idxs = np.where(region_mask < 0)
region_mask[neg_idxs] = 100
# - remove negative values from overlapping holes
overlap_indexes = np.where(region_mask > class_id)
region_mask[overlap_indexes] = 100
# merge current object to main mask
class_idx_not_zero = np.where(region_mask != 0)
mask[class_idx_not_zero] = region_mask[class_idx_not_zero]
# fill all single pixel holes
if fill_holes:
mask = Export.fill_mask_holes(mask, max_hole_size)
# convert 2d mask into 3d mask with colors
color_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
class_ids = np.unique(mask) # type: ignore
for i in class_ids:
if i == 0:
# don't add color to background
continue
indexes = np.where(mask == i)
color_mask[indexes] = Export._get_color(i - 1)[0:3]
return color_mask
@staticmethod
def _export_png_mask_data(
datapoints: List[Dict],
taxonomy: Dict,
mask_dir: str,
class_map: str,
datapoint_map: str,
fill_holes: bool = False,
max_hole_size: int = 30,
) -> None:
"""Export png masks and map json."""
# pylint: disable=too-many-locals
# Create a color map from the taxonomy
class_id_map: Dict = {}
color_map: Dict = {}
Export.tax_class_id_mapping(
taxonomy["categories"][0]["children"], class_id_map, color_map
)
# Convert rbai to png masks and save output
dp_map = {}
print_info("Converting to masks")
for datapoint in tqdm.tqdm(datapoints):
labels = [label for label in datapoint["labels"] if "pixel" in label]
if not labels:
print_warning(
f"No segmentation labels in task {datapoint['taskId']}, skipping"
)
continue
filename = f"{datapoint['taskId']}.png"
dp_map[filename] = datapoint["items"][0]
color_mask = Export.convert_rbai_mask(
labels, class_id_map, fill_holes, max_hole_size
)
# save png as 3 channel np.uint8 image
pil_color_mask = Image.fromarray(color_mask.astype(np.uint8))
pil_color_mask.save(os.path.join(mask_dir, filename))
with open(class_map, "w", encoding="utf-8") as file_:
json.dump(color_map, file_, indent=2)
with open(datapoint_map, "w", encoding="utf-8") as file_:
json.dump(dp_map, file_, indent=2)
def redbrick_png( # pylint: disable=too-many-locals
self,
only_ground_truth: bool = True,
concurrency: int = 10,
task_id: Optional[str] = None,
fill_holes: bool = False,
max_hole_size: int = 30,
) -> None:
"""
Export segmentation labels as masks.
Masks are exported to a local directory named after project_id.
Please visit https://docs.redbrickai.com/python-sdk/reference#png-mask-formats
to see an overview of the format of the exported masks.
>>> project = redbrick.get_project(api_key, url, org_id, project_id)
>>> project.export.redbrick_png()
Parameters
--------------
only_ground_truth: bool = True
If set to True, will only return data that has
been completed in your workflow. If False, will
export latest state
concurrency: int = 10
task_id: Optional[str] = None
If the unique task_id is mentioned, only a single
datapoint will be exported.
fill_holes : bool = False
If set to True, will fill any holes in your segmentation
masks.
max_hole_size: int = 10
If fill_holes = True, this parameter defines the maximum
size hole, in pixels, to fill.
Warnings
----------
redbrick_png only works for the following types - IMAGE_SEGMENTATION, IMAGE_MULTI
"""
if self.project_type not in (
LabelType.IMAGE_SEGMENTATION,
LabelType.IMAGE_MULTI,
):
print_error(
f"Project type needs to be {LabelType.IMAGE_SEGMENTATION} or "
+ f"{LabelType.IMAGE_MULTI} for redbrick_png"
)
return
if task_id:
datapoints, taxonomy = self._get_raw_data_single(task_id)
elif only_ground_truth:
datapoints, taxonomy = self._get_raw_data_ground_truth(concurrency)
else:
datapoints, taxonomy = self._get_raw_data_latest(concurrency)
# Create output directory
output_dir = uniquify_path(self.project_id)
mask_dir = os.path.join(output_dir, "masks")
os.makedirs(mask_dir, exist_ok=True)
print_info(f"Saving masks to {output_dir} directory")
Export._export_png_mask_data(
datapoints,
taxonomy,
mask_dir,
os.path.join(output_dir, "class_map.json"),
os.path.join(output_dir, "datapoint_map.json"),
fill_holes,
max_hole_size,
)
def redbrick_nifti(
self,
only_ground_truth: bool = True,
concurrency: int = 10,
task_id: Optional[str] = None,
) -> None:
"""
Export dicom segmentation labels in NIfTI-1 format.
>>> project = redbrick.get_project(api_key, url, org_id, project_id)
>>> project.export.redbrick_nifti()
Parameters
--------------
only_ground_truth: bool = True
If set to True, will only return data that has
been completed in your workflow. If False, will
export latest state
concurrency: int = 10
task_id: Optional[str] = None
If the unique task_id is mentioned, only a single
datapoint will be exported.
Warnings
----------
redbrick_nifti only works for the following types - DICOM_SEGMENTATION
"""
if self.project_type != LabelType.DICOM_SEGMENTATION:
print_error(
f"Project type needs to be {LabelType.DICOM_SEGMENTATION} "
+ "for redbrick_nifi"
)
return
if task_id:
datapoints, _ = self._get_raw_data_single(task_id)
elif only_ground_truth:
datapoints, _ = self._get_raw_data_ground_truth(concurrency)
else:
datapoints, _ = self._get_raw_data_latest(concurrency)
# Create output directory
destination = uniquify_path(self.project_id)
nifti_dir = os.path.join(destination, "nifti")
os.makedirs(nifti_dir, exist_ok=True)
print_info(f"Saving NIfTI files to {destination} directory")
files = []
for datapoint in datapoints:
files.append(
(
datapoint["labelsPath"],
os.path.join(nifti_dir, f"{datapoint['taskId']}.nii"),
)
)
paths: List[Optional[str]] = asyncio.run(download_files(files))
tasks = [
{
"filePath": path,
**{
key: value
for key, value in datapoint.items()
if key not in ("currentStageName", "labelsPath")
},
}
for datapoint, path in zip(datapoints, paths)
]
with open(
os.path.join(destination, "tasks.json"), "w", encoding="utf-8"
) as tasks_file:
json.dump(tasks, tasks_file, indent=2)
def redbrick_format(
self,
only_ground_truth: bool = True,
concurrency: int = 10,
task_id: Optional[str] = None,
) -> List[Dict]:
"""
Export data into redbrick format.
>>> project = redbrick.get_project(api_key, url, org_id, project_id)
>>> result = project.export.redbrick_format()
Parameters
-----------------
only_ground_truth: bool = True
If set to True, will only return data that has
been completed in your workflow. If False, will
export latest state
concurrency: int = 10
task_id: Optional[str] = None
If the unique task_id is mentioned, only a single
datapoint will be exported.
Returns:
-----------------
List[Dict]
Datapoint and labels in RedBrick AI format. See
https://docs.redbrickai.com/python-sdk/reference
"""
if task_id:
datapoints, _ = self._get_raw_data_single(task_id)
elif only_ground_truth:
datapoints, _ = self._get_raw_data_ground_truth(concurrency)
else:
datapoints, _ = self._get_raw_data_latest(concurrency)
return [
{
key: value
for key, value in datapoint.items()
if key not in ("currentStageName", "labelsPath")
}
for datapoint in datapoints
]
def coco_format(
self,
only_ground_truth: bool = True,
concurrency: int = 10,
task_id: Optional[str] = None,
) -> Dict:
"""
Export project into coco format.
>>> project = redbrick.get_project(api_key, url, org_id, project_id)
>>> result = project.export.coco_format()
Parameters
-----------
only_ground_truth: bool = True
If set to True, will only return data that has
been completed in your workflow. If False, will
export latest state
concurrency: int = 10
task_id: Optional[str] = None
If the unique task_id is mentioned, only a single
datapoint will be exported.
Returns
-----------
List[Dict]
Datapoint and labels in COCO format. See
https://cocodataset.org/#format-data
Warnings
----------
redbrick_coco only works for the following types - IMAGE_BBOX, IMAGE_POLYGON
"""
if self.project_type not in (LabelType.IMAGE_BBOX, LabelType.IMAGE_POLYGON):
print_error(
f"Project type needs to be {LabelType.IMAGE_BBOX} or "
+ f"{LabelType.IMAGE_POLYGON} for redbrick_coco"
)
return {}
if task_id:
datapoints, taxonomy = self._get_raw_data_single(task_id)
elif only_ground_truth:
datapoints, taxonomy = self._get_raw_data_ground_truth(concurrency)
else:
datapoints, taxonomy = self._get_raw_data_latest(concurrency)
return coco_converter(datapoints, taxonomy)
|
<filename>pyod/test/test_data.py
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
from sklearn.utils.testing import assert_equal
# noinspection PyProtectedMember
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
import numpy as np
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
from pyod.utils.data import generate_data_categorical
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.utils.data import generate_data
from pyod.utils.data import evaluate_print
from pyod.utils.data import get_outliers_inliers
from pyod.utils.data import check_consistent_shape
from pyod.utils.data import generate_data_clusters
class TestData(unittest.TestCase):
def setUp(self):
self.n_train = 1000
self.n_test = 500
self.contamination = 0.1
self.n_samples = 1000
self.test_size = 0.2
self.value_lists = [0.1, 0.3, 0.2, -2, 1.5, 0, 1, -1, -0.5, 11]
self.random_state = 42
def test_data_generate(self):
X_train, y_train, X_test, y_test = \
generate_data(n_train=self.n_train,
n_test=self.n_test,
contamination=self.contamination)
assert_equal(y_train.shape[0], X_train.shape[0])
assert_equal(y_test.shape[0], X_test.shape[0])
assert_less_equal(self.n_train - X_train.shape[0], 1)
assert_equal(X_train.shape[1], 2)
assert_less_equal(self.n_test - X_test.shape[0], 1)
assert_equal(X_test.shape[1], 2)
out_perc = np.sum(y_train) / self.n_train
assert_allclose(self.contamination, out_perc, atol=0.01)
out_perc = np.sum(y_test) / self.n_test
assert_allclose(self.contamination, out_perc, atol=0.01)
def test_data_generate2(self):
X_train, y_train, X_test, y_test = \
generate_data(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
contamination=self.contamination)
assert_allclose(X_train.shape, (self.n_train, 3))
assert_allclose(X_test.shape, (self.n_test, 3))
def test_data_generate3(self):
X_train, y_train, X_test, y_test = \
generate_data(n_train=self.n_train,
n_test=self.n_test,
n_features=2,
contamination=self.contamination,
random_state=42)
X_train2, y_train2, X_test2, y_test2 = \
generate_data(n_train=self.n_train,
n_test=self.n_test,
n_features=2,
contamination=self.contamination,
random_state=42)
assert_allclose(X_train, X_train2)
assert_allclose(X_test, X_test2)
assert_allclose(y_train, y_train2)
assert_allclose(y_test, y_test2)
def test_data_generate_cluster(self):
X_train, X_test, y_train, y_test = \
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=2,
contamination=self.contamination,
random_state=self.random_state)
assert_equal(y_train.shape[0], X_train.shape[0])
assert_equal(y_test.shape[0], X_test.shape[0])
assert_less_equal(self.n_train - X_train.shape[0], 1)
assert_equal(X_train.shape[1], 2)
assert_less_equal(self.n_test - X_test.shape[0], 1)
assert_equal(X_test.shape[1], 2)
out_perc = (np.sum(y_train) + np.sum(y_test)) / (
self.n_train + self.n_test)
assert_allclose(self.contamination, out_perc, atol=0.01)
def test_data_generate_cluster2(self):
X_train, X_test, y_train, y_test = \
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=4,
contamination=self.contamination,
random_state=self.random_state)
assert_allclose(X_train.shape, (self.n_train, 4))
assert_allclose(X_test.shape, (self.n_test, 4))
def test_data_generate_cluster3(self):
X_train, y_train, X_test, y_test = \
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
contamination=self.contamination,
random_state=self.random_state)
X_train2, y_train2, X_test2, y_test2 = \
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
contamination=self.contamination,
random_state=self.random_state)
assert_allclose(X_train, X_train2)
assert_allclose(X_test, X_test2)
assert_allclose(y_train, y_train2)
assert_allclose(y_test, y_test2)
def test_data_generate_cluster5(self):
with assert_raises(ValueError):
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
n_clusters='e',
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features='e',
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
contamination='e',
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
contamination=self.contamination,
dist='e',
random_state=self.random_state)
def test_data_generate_cluster6(self):
X_train, X_test, y_train, y_test = \
generate_data_clusters(n_train=self.n_train,
n_test=self.n_test,
n_features=2,
size='different',
density='different',
contamination=self.contamination,
random_state=self.random_state)
assert_equal(y_train.shape[0], X_train.shape[0])
assert_equal(y_test.shape[0], X_test.shape[0])
assert_less_equal(self.n_train - X_train.shape[0], 1)
assert_equal(X_train.shape[1], 2)
assert_less_equal(self.n_test - X_test.shape[0], 1)
assert_equal(X_test.shape[1], 2)
out_perc = (np.sum(y_train) + np.sum(y_test)) / (
self.n_train + self.n_test)
assert_allclose(self.contamination, out_perc, atol=0.01)
def test_data_generate_categorical(self):
X_train, X_test, y_train, y_test = \
generate_data_categorical(n_train=self.n_train,
n_test=self.n_test,
n_features=2,
contamination=self.contamination,
random_state=self.random_state)
assert_equal(y_train.shape[0], X_train.shape[0])
assert_equal(y_test.shape[0], X_test.shape[0])
assert_less_equal(self.n_train - X_train.shape[0], 1)
assert_equal(X_train.shape[1], 2)
assert_less_equal(self.n_test - X_test.shape[0], 1)
assert_equal(X_test.shape[1], 2)
out_perc = (np.sum(y_train) + np.sum(y_test)) / (
self.n_train + self.n_test)
assert_allclose(self.contamination, out_perc, atol=0.01)
def test_data_generate_categorical2(self):
X_train, X_test, y_train, y_test = \
generate_data_categorical(n_train=self.n_train,
n_test=self.n_test,
n_features=4,
contamination=self.contamination,
random_state=self.random_state)
assert_allclose(X_train.shape, (self.n_train, 4))
assert_allclose(X_test.shape, (self.n_test, 4))
def test_data_generate_categorical3(self):
X_train, y_train, X_test, y_test = \
generate_data_categorical(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
contamination=self.contamination,
random_state=self.random_state)
X_train2, y_train2, X_test2, y_test2 = \
generate_data_categorical(n_train=self.n_train,
n_test=self.n_test,
n_features=3,
contamination=self.contamination,
random_state=self.random_state)
assert np.array_equal(X_train, X_train2)
assert np.array_equal(X_train, X_train2)
assert np.array_equal(X_test, X_test2)
assert np.array_equal(y_train, y_train2)
assert np.array_equal(y_test, y_test2)
def test_data_generate_categorical5(self):
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=-1)
with assert_raises(ValueError):
generate_data_categorical(n_train=0, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=-1,
n_category_in=5, n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train='not int', n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test='not int',
n_category_in=5, n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=1, n_features= 0,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=1, n_features='not int',
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=-1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative='not int', n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=1, n_features=1,
contamination=0.6,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=3,
n_informative=1, n_features=1,
contamination='not float',
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=-1, n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in='not int', n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=self.n_train+self.n_test+1,
n_category_out=3,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out=-1,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5, n_category_out='not int',
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5,
n_category_out=self.n_train+self.n_test+1,
n_informative=1, n_features=1,
contamination=self.contamination,
random_state=self.random_state)
with assert_raises(ValueError):
generate_data_categorical(n_train=self.n_train, n_test=self.n_test,
n_category_in=5,
n_category_out=5,
n_informative=2, n_features=2,
contamination=self.contamination,
shuffle='not bool',
random_state=self.random_state)
def test_evaluate_print(self):
X_train, y_train, X_test, y_test = generate_data(
n_train=self.n_train,
n_test=self.n_test,
contamination=self.contamination)
evaluate_print('dummy', y_train, y_train * 0.1)
def test_get_outliers_inliers(self):
X_train, y_train = generate_data(
n_train=self.n_train, train_only=True,
contamination=self.contamination)
X_outliers, X_inliers = get_outliers_inliers(X_train, y_train)
inlier_index = int(self.n_train * (1 - self.contamination))
assert_allclose(X_train[0:inlier_index, :], X_inliers)
assert_allclose(X_train[inlier_index:, :], X_outliers)
def test_check_consistent_shape(self):
X_train, y_train, X_test, y_test = generate_data(
n_train=self.n_train,
n_test=self.n_test,
contamination=self.contamination)
X_train_n, y_train_n, X_test_n, y_test_n, y_train_pred_n, y_test_pred_n \
= check_consistent_shape(X_train, y_train, X_test, y_test,
y_train, y_test)
assert_allclose(X_train_n, X_train)
assert_allclose(y_train_n, y_train)
assert_allclose(X_test_n, X_test)
assert_allclose(y_test_n, y_test)
assert_allclose(y_train_pred_n, y_train)
assert_allclose(y_test_pred_n, y_test)
# test shape difference
with assert_raises(ValueError):
check_consistent_shape(X_train, y_train, y_train, y_test,
y_train, y_test)
# test shape difference between X_train and X_test
X_test = np.hstack((X_test, np.zeros(
(X_test.shape[0], 1)))) # add extra column/feature
with assert_raises(ValueError):
check_consistent_shape(X_train, y_train, X_test, y_test,
y_train_pred_n, y_test_pred_n)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
import os
import random
import time
import WifiConnUtility
from NativeLog import NativeLog
from TCAction import TCActionBase
from Utility import Encoding
from Utility import MakeFolder
STEPS = {"SCAN1": 0x01, "JAP": 0x02, "SCAN2": 0x04, "RECONNECT": 0x08}
AP_PROP = ("ssid", "ssid_len", "pwd",
"pwd_len", "channel", "enc", "apc")
JAP_TEST_METHOD = ("Normal", "OFF_ON", "OFF", "WRONG_PROP")
RECONNECT_TEST_METHOD = ("OFF_ON", "OFF")
LOG_FOLDER = os.path.join("Performance", "JAP")
SSID_LEN_RANGE = (1, 32) # in bytes
ENC_TYPE = (0, 2, 3, 4) # do not support WEP for 8266 soft AP
PWD_RANGE = {0: [0, 0],
1: [5, 5],
2: [8, 63],
3: [8, 63],
4: [8, 63],
}
class TestCase(TCActionBase.CommonTCActionBase):
def __init__(self, test_case, test_env, timeout=30, log_path=TCActionBase.LOG_PATH):
TCActionBase.CommonTCActionBase.__init__(self, test_case, test_env, timeout=timeout, log_path=log_path)
# default value for optional configurable params
self.performance_folder_path = log_path
self.pwd_len = [8, 64]
self.step_config = [0x03, 0x01, 0x02, 0x0B, 0x0F]
self.join_test_method = ["Normal"]
self.join_delay = [[1.5, 5], [1.5, 5]]
self.reconnect_test_method = ["OFF_ON"]
self.reconnect_delay = [[1.5, 5], [1.5, 6]]
# load param from excel
cmd_set = test_case["cmd set"]
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy" and cmd_set[i][0] != "":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
# read AP list
self.ap_list = []
for i in range(1, len(cmd_set)):
for j in range(len(cmd_set[i][1])):
if cmd_set[i][1][j] != "":
cmd_string = "self.ap_list.append(dict(zip(AP_PROP, " + cmd_set[i][1][j] + ")))"
exec cmd_string
folder_path = MakeFolder.make_folder(self.performance_folder_path + "\\" + LOG_FOLDER)
file_name = "JAP_log_%s.log" % (time.strftime("%m%d%H%M%S", time.localtime()))
self._performance_log_file = os.path.join(folder_path, file_name)
# test statistics
self._succeed_count = self._fail_count = self._time_cost_count = 0
self._total_time = self._longest_time = 0
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
# get target type "SSC" or "AT"
self.target_type = ["SSC" if test_env.get_port_by_name("AT1") is None else "AT"]
self.target_type.append("SSC" if test_env.get_port_by_name("AT2") is None else "AT")
self._utility = WifiConnUtility.WifiConnUtility(self)
pass
def _generate_random_ap_prop(self):
ap_prop = dict.fromkeys(AP_PROP)
# generate target ap_value
ap_prop["ssid_len"] = random.randint(SSID_LEN_RANGE[0], SSID_LEN_RANGE[1])
ap_prop["channel"] = random.choice(range(1, 14))
ap_prop["enc"] = random.choice(ENC_TYPE)
ap_prop["pwd_len"] = random.randint(PWD_RANGE[ap_prop["enc"]][0], PWD_RANGE[ap_prop["enc"]][1])
# generate string
if self.target_type[0] == self.target_type[1] == "AT":
ap_prop["ssid"] = Encoding.generate_random_utf8_str(ap_prop["ssid_len"])
ap_prop["pwd"] = Encoding.generate_random_utf8_str(ap_prop["pwd_len"])
# NativeLog.add_trace_info("ssid hex is : %x" % ap_prop["ssid"])
# NativeLog.add_trace_info("pwd hex is : %x" % ap_prop["pwd"])
else:
ap_prop["ssid"] = Encoding.generate_random_printable_str(ap_prop["ssid_len"])
ap_prop["pwd"] = Encoding.generate_random_printable_str(ap_prop["pwd_len"])
return ap_prop
def _logging_performance(self, ssid, join_method="Normal", time_cost=0):
# log performance to performance log file
with open(self._performance_log_file, "ab+") as f:
# log time and ssid
f.write("\r\n[%s]:\r\n[AP name] %s\r\n" %
(time.strftime("%m-%d %H:%M:%S", time.localtime()), ssid))
if join_method == "Normal" or join_method == "OFF_ON":
if time_cost is not False:
self._succeed_count += 1
if join_method == "Normal":
f.write("[Succeed][%f]\r\n" % time_cost)
self._longest_time = (time_cost > self._longest_time and
[time_cost] or [self._longest_time])[0]
self._time_cost_count += 1
self._total_time += time_cost
else:
f.write("[Succeed][%s]\r\n" % join_method)
else:
self._fail_count += 1
f.write("[Fail][%s]\r\n" % join_method)
pass
def _logging_fail_step(self, ssid, step):
with open(self._performance_log_file, "ab+") as f:
f.write("\r\n[%s]:\r\n[AP name] %s\r\n" %
(time.strftime("%m-%d %H:%M:%S", time.localtime()), ssid))
f.write("[Fail][%s]\r\n" % step)
pass
def _generate_performance_report(self):
with open(self._performance_log_file, "ab+") as f:
f.write("[Test report] Succeed: %d\r\n" % self._succeed_count)
f.write("[Test report] Failed: %d\r\n" % self._fail_count)
if self._succeed_count > 0 or self._fail_count > 0:
f.write("[Test report] Pass Rate: %f\r\n" %
(self._succeed_count/(self._fail_count+self._succeed_count)))
if self._time_cost_count > 0:
f.write("[Test report] Average time: %f\r\n" % (self._total_time/self._time_cost_count))
f.write("[Test report] Longest time: %f\r\n" % self._longest_time)
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.result_cntx.start()
# mandatory configurable params
try:
target_ap_num = self.target_ap_num
test_count = self.test_count
except StandardError, e:
NativeLog.add_trace_critical("Error configuration for WifiJAP script, error is %s" % e)
raise StandardError("Error configuration")
# prepare ap list
_ap_list = [["target", None]] * target_ap_num
for _ap_prop in self.ap_list:
_ap_list.append(["AP", _ap_prop])
# set to correct mode first
self._utility.set_mode([1, 2])
for i in xrange(test_count):
_ap = random.choice(_ap_list)
# arrange ap
_ap_type = _ap[0]
_ap_prop = _ap[1]
if _ap_type == "target":
_ap_prop = self._generate_random_ap_prop()
pass
# step 1 : mandatory step, set up AP
if self._utility.setup_ap(_ap_type, _ap_prop) is False:
self._logging_fail_step(_ap_prop["ssid"], "Set AP")
NativeLog.add_prompt_trace("[Step1] setup AP Fail")
continue
step_config = random.choice(self.step_config)
NativeLog.add_prompt_trace("[Step1] setup AP succeed")
# step 2 : optional step, do scan before connect
if step_config & STEPS["SCAN1"] != 0: # check option
if self._utility.do_scan(_ap_prop) is False:
self._logging_fail_step(_ap_prop["ssid"], "Scan before JAP")
NativeLog.add_prompt_trace("[Step2] Scan Done")
# step 3 : mandatory step, join AP
if step_config & STEPS["JAP"] != 0: # check option
_join_test_method = random.choice(self.join_test_method)
time_cost = self._utility.join_ap(_join_test_method, _ap_type, _ap_prop, self.join_delay)
# log performance to performance log file
self._logging_performance(_ap_prop["ssid"], _join_test_method, time_cost)
if time_cost is False:
# do scan once to check if AP exist
self._utility.do_scan(_ap_prop)
continue
NativeLog.add_prompt_trace("[Step3] Join AP done")
# step 4 : optional step, scan after join AP
if step_config & STEPS["SCAN2"] != 0: # check option
if self._utility.do_scan(_ap_prop) is False:
self._logging_fail_step(_ap_prop["ssid"], "Scan after JAP")
NativeLog.add_prompt_trace("[Step4] Scan done")
# step 5 : optional step, reconnect test
if step_config & STEPS["RECONNECT"] != 0: # check option
_reconnect_test_method = random.choice(self.reconnect_test_method)
if self._utility.do_reconnect(_reconnect_test_method,
_ap_type, _ap_prop, self.reconnect_delay) is False:
self._logging_fail_step(_ap_prop["ssid"], "Reconnect")
NativeLog.add_prompt_trace("[Step5] Reconnect done")
# continue to next loop
NativeLog.add_prompt_trace("[WifiJAP] Test count %d done" % i)
# generate report and cleanup
self._generate_performance_report()
self.result_cntx.set_result("Succeed")
def result_check(self, port_name, data):
TCActionBase.CommonTCActionBase.result_check(self, port_name, data)
self.result_cntx.append_data(port_name, data)
def main():
pass
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
name: demo_ncnn.py
date: 2020-12-16 11:21:07
Env.: Python 3.7.3, WIN 10
"""
import argparse
from abc import ABCMeta, abstractmethod
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import softmax
from tqdm import tqdm
# Copy from nanodet/util/visualization.py
_COLORS = (
np.array(
[
0.000,
0.447,
0.741,
0.850,
0.325,
0.098,
0.929,
0.694,
0.125,
0.494,
0.184,
0.556,
0.466,
0.674,
0.188,
0.301,
0.745,
0.933,
0.635,
0.078,
0.184,
0.300,
0.300,
0.300,
0.600,
0.600,
0.600,
1.000,
0.000,
0.000,
1.000,
0.500,
0.000,
0.749,
0.749,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
1.000,
0.667,
0.000,
1.000,
0.333,
0.333,
0.000,
0.333,
0.667,
0.000,
0.333,
1.000,
0.000,
0.667,
0.333,
0.000,
0.667,
0.667,
0.000,
0.667,
1.000,
0.000,
1.000,
0.333,
0.000,
1.000,
0.667,
0.000,
1.000,
1.000,
0.000,
0.000,
0.333,
0.500,
0.000,
0.667,
0.500,
0.000,
1.000,
0.500,
0.333,
0.000,
0.500,
0.333,
0.333,
0.500,
0.333,
0.667,
0.500,
0.333,
1.000,
0.500,
0.667,
0.000,
0.500,
0.667,
0.333,
0.500,
0.667,
0.667,
0.500,
0.667,
1.000,
0.500,
1.000,
0.000,
0.500,
1.000,
0.333,
0.500,
1.000,
0.667,
0.500,
1.000,
1.000,
0.500,
0.000,
0.333,
1.000,
0.000,
0.667,
1.000,
0.000,
1.000,
1.000,
0.333,
0.000,
1.000,
0.333,
0.333,
1.000,
0.333,
0.667,
1.000,
0.333,
1.000,
1.000,
0.667,
0.000,
1.000,
0.667,
0.333,
1.000,
0.667,
0.667,
1.000,
0.667,
1.000,
1.000,
1.000,
0.000,
1.000,
1.000,
0.333,
1.000,
1.000,
0.667,
1.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.143,
0.143,
0.143,
0.286,
0.286,
0.286,
0.429,
0.429,
0.429,
0.571,
0.571,
0.571,
0.714,
0.714,
0.714,
0.857,
0.857,
0.857,
0.000,
0.447,
0.741,
0.314,
0.717,
0.741,
0.50,
0.5,
0,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
def get_resize_matrix(raw_shape, dst_shape, keep_ratio):
"""
Get resize matrix for resizing raw img to input size
:param raw_shape: (width, height) of raw image
:param dst_shape: (width, height) of input image
:param keep_ratio: whether keep original ratio
:return: 3x3 Matrix
"""
r_w, r_h = raw_shape
d_w, d_h = dst_shape
Rs = np.eye(3)
if keep_ratio:
C = np.eye(3)
C[0, 2] = -r_w / 2
C[1, 2] = -r_h / 2
if r_w / r_h < d_w / d_h:
ratio = d_h / r_h
else:
ratio = d_w / r_w
Rs[0, 0] *= ratio
Rs[1, 1] *= ratio
T = np.eye(3)
T[0, 2] = 0.5 * d_w
T[1, 2] = 0.5 * d_h
return T @ Rs @ C
else:
Rs[0, 0] *= d_w / r_w
Rs[1, 1] *= d_h / r_h
return Rs
def warp_boxes(boxes, M, width, height):
"""Apply transform to boxes
Copy from nanodet/data/transform/warp.py
"""
n = len(boxes)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2
) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
return xy.astype(np.float32)
else:
return boxes
def overlay_bbox_cv(img, all_box, class_names):
"""Draw result boxes
Copy from nanodet/util/visualization.py
"""
# all_box array of [label, x0, y0, x1, y1, score]
all_box.sort(key=lambda v: v[5])
for box in all_box:
label, x0, y0, x1, y1, score = box
# color = self.cmap(i)[:3]
color = (_COLORS[label] * 255).astype(np.uint8).tolist()
text = "{}:{:.1f}%".format(class_names[label], score * 100)
txt_color = (0, 0, 0) if np.mean(_COLORS[label]) > 0.5 else (255, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
txt_size = cv2.getTextSize(text, font, 0.5, 2)[0]
cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
cv2.rectangle(
img,
(x0, y0 - txt_size[1] - 1),
(x0 + txt_size[0] + txt_size[1], y0 - 1),
color,
-1,
)
cv2.putText(img, text, (x0, y0 - 1), font, 0.5, txt_color, thickness=1)
return img
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
# _, indexes = scores.sort(descending=True)
indexes = np.argsort(scores)
# indexes = indexes[:candidate_size]
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
# current = indexes[0]
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
# indexes = indexes[1:]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
class NanoDetABC(metaclass=ABCMeta):
def __init__(
self,
input_shape=[320, 320],
reg_max=7,
strides=[8, 16, 32],
prob_threshold=0.4,
iou_threshold=0.3,
num_candidate=1000,
top_k=-1,
):
self.strides = strides
self.input_shape = input_shape
self.reg_max = reg_max
self.prob_threshold = prob_threshold
self.iou_threshold = iou_threshold
self.num_candidate = num_candidate
self.top_k = top_k
self.img_mean = [103.53, 116.28, 123.675]
self.img_std = [57.375, 57.12, 58.395]
self.input_size = (self.input_shape[1], self.input_shape[0])
self.class_names = [
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic_light",
"fire_hydrant",
"stop_sign",
"parking_meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports_ball",
"kite",
"baseball_bat",
"baseball_glove",
"skateboard",
"surfboard",
"tennis_racket",
"bottle",
"wine_glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot_dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted_plant",
"bed",
"dining_table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell_phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy_bear",
"hair_drier",
"toothbrush",
]
def preprocess(self, img):
# resize image
ResizeM = get_resize_matrix((img.shape[1], img.shape[0]), self.input_size, True)
img_resize = cv2.warpPerspective(img, ResizeM, dsize=self.input_size)
# normalize image
img_input = img_resize.astype(np.float32) / 255
img_mean = np.array(self.img_mean, dtype=np.float32).reshape(1, 1, 3) / 255
img_std = np.array(self.img_std, dtype=np.float32).reshape(1, 1, 3) / 255
img_input = (img_input - img_mean) / img_std
# expand dims
img_input = np.transpose(img_input, [2, 0, 1])
img_input = np.expand_dims(img_input, axis=0)
return img_input, ResizeM
def postprocess(self, scores, raw_boxes, ResizeM, raw_shape):
# generate centers
decode_boxes = []
select_scores = []
for stride, box_distribute, score in zip(self.strides, raw_boxes, scores):
# centers
fm_h = self.input_shape[0] / stride
fm_w = self.input_shape[1] / stride
h_range = np.arange(fm_h)
w_range = np.arange(fm_w)
ww, hh = np.meshgrid(w_range, h_range)
ct_row = (hh.flatten() + 0.5) * stride
ct_col = (ww.flatten() + 0.5) * stride
center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)
# box distribution to distance
reg_range = np.arange(self.reg_max + 1)
box_distance = box_distribute.reshape((-1, self.reg_max + 1))
box_distance = softmax(box_distance, axis=1)
box_distance = box_distance * np.expand_dims(reg_range, axis=0)
box_distance = np.sum(box_distance, axis=1).reshape((-1, 4))
box_distance = box_distance * stride
# top K candidate
topk_idx = np.argsort(score.max(axis=1))[::-1]
topk_idx = topk_idx[: self.num_candidate]
center = center[topk_idx]
score = score[topk_idx]
box_distance = box_distance[topk_idx]
# decode box
decode_box = center + [-1, -1, 1, 1] * box_distance
select_scores.append(score)
decode_boxes.append(decode_box)
# nms
bboxes = np.concatenate(decode_boxes, axis=0)
confidences = np.concatenate(select_scores, axis=0)
picked_box_probs = []
picked_labels = []
for class_index in range(0, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > self.prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = bboxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(
box_probs,
iou_threshold=self.iou_threshold,
top_k=self.top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
# resize output boxes
picked_box_probs[:, :4] = warp_boxes(
picked_box_probs[:, :4], np.linalg.inv(ResizeM), raw_shape[1], raw_shape[0]
)
return (
picked_box_probs[:, :4].astype(np.int32),
np.array(picked_labels),
picked_box_probs[:, 4],
)
@abstractmethod
def infer_image(self, img_input):
pass
def detect(self, img):
raw_shape = img.shape
img_input, ResizeM = self.preprocess(img)
scores, raw_boxes = self.infer_image(img_input)
if scores[0].ndim == 1: # handling num_classes=1 case
scores = [x[:, None] for x in scores]
bbox, label, score = self.postprocess(scores, raw_boxes, ResizeM, raw_shape)
return bbox, label, score
def draw_box(self, raw_img, bbox, label, score):
img = raw_img.copy()
all_box = [
[
x,
]
+ y
+ [
z,
]
for x, y, z in zip(label, bbox.tolist(), score)
]
img_draw = overlay_bbox_cv(img, all_box, self.class_names)
return img_draw
def detect_folder(self, img_fold, result_path):
img_fold = Path(img_fold)
result_path = Path(result_path)
result_path.mkdir(parents=True, exist_ok=True)
img_name_list = filter(
lambda x: str(x).endswith(".png") or str(x).endswith(".jpg"),
img_fold.iterdir(),
)
img_name_list = list(img_name_list)
print(f"find {len(img_name_list)} images")
for img_path in tqdm(img_name_list):
img = cv2.imread(str(img_path))
bbox, label, score = self.detect(img)
img_draw = self.draw_box(img, bbox, label, score)
save_path = str(result_path / img_path.name.replace(".png", ".jpg"))
cv2.imwrite(save_path, img_draw)
class NanoDetONNX(NanoDetABC):
def __init__(self, model_path, *args, **kwargs):
import onnxruntime as ort
super(NanoDetONNX, self).__init__(*args, **kwargs)
print("Using ONNX as inference backend")
print(f"Using weight: {model_path}")
# load model
self.model_path = model_path
self.ort_session = ort.InferenceSession(self.model_path)
self.input_name = self.ort_session.get_inputs()[0].name
def infer_image(self, img_input):
inference_results = self.ort_session.run(None, {self.input_name: img_input})
scores = [np.squeeze(x) for x in inference_results[:3]]
raw_boxes = [np.squeeze(x) for x in inference_results[3:]]
return scores, raw_boxes
class NanoDetTorch(NanoDetABC):
def __init__(self, model_path, cfg_path, *args, **kwargs):
import torch
from nanodet.model.arch import build_model
from nanodet.util import Logger, cfg, load_config, load_model_weight
super(NanoDetTorch, self).__init__(*args, **kwargs)
print("Using PyTorch as inference backend")
print(f"Using weight: {model_path}")
# load model
self.model_path = model_path
self.cfg_path = cfg_path
load_config(cfg, cfg_path)
self.logger = Logger(-1, cfg.save_dir, False)
self.model = build_model(cfg.model)
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
load_model_weight(self.model, checkpoint, self.logger)
def infer_image(self, img_input):
import torch
self.model.train(False)
with torch.no_grad():
inference_results = self.model(torch.from_numpy(img_input))
scores = [
x.permute(0, 2, 3, 1).reshape((-1, 80)).sigmoid().detach().numpy()
for x in inference_results[0]
]
raw_boxes = [
x.permute(0, 2, 3, 1).reshape((-1, 32)).detach().numpy()
for x in inference_results[1]
]
return scores, raw_boxes
class NanoDetNCNN(NanoDetABC):
def __init__(self, model_param, model_bin, *args, **kwargs):
import ncnn
super(NanoDetNCNN, self).__init__(*args, **kwargs)
print("Using ncnn as inference backend")
print(f"Using param: {model_param}, bin: {model_bin}")
# load model
self.model_param = model_param
self.model_bin = model_bin
self.net = ncnn.Net()
self.net.load_param(model_param)
self.net.load_model(model_bin)
self.input_name = "input.1"
def infer_image(self, img_input):
import ncnn
mat_in = ncnn.Mat(img_input.squeeze())
ex = self.net.create_extractor()
ex.input(self.input_name, mat_in)
score_out_name = [
"cls_pred_stride_8",
"cls_pred_stride_16",
"cls_pred_stride_32",
]
scores = [np.array(ex.extract(x)[1]) for x in score_out_name]
scores = [np.reshape(x, (-1, 80)) for x in scores]
boxes_out_name = [
"dis_pred_stride_8",
"dis_pred_stride_16",
"dis_pred_stride_32",
]
raw_boxes = [np.array(ex.extract(x)[1]) for x in boxes_out_name]
raw_boxes = [np.reshape(x, (-1, 32)) for x in raw_boxes]
return scores, raw_boxes
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_path", dest="model_path", type=str, default="../model/nanodet_m.param"
)
parser.add_argument(
"--model_bin", dest="model_bin", type=str, default="../model/nanodet_m.bin"
)
parser.add_argument(
"--cfg_path", dest="cfg_path", type=str, default="config/nanodet-m.yml"
)
parser.add_argument("--img_fold", dest="img_fold", type=str, default="../imgs")
parser.add_argument(
"--result_fold", dest="result_fold", type=str, default="../results"
)
parser.add_argument(
"--input_shape", dest="input_shape", nargs=2, type=int, default=[320, 320]
)
parser.add_argument("--backend", choices=["ncnn", "ONNX", "torch"], default="ncnn")
args = parser.parse_args()
print(f"Detecting {args.img_fold}")
# load detector
if args.backend == "ncnn":
detector = NanoDetNCNN(
args.model_path, args.model_bin, input_shape=args.input_shape
)
elif args.backend == "ONNX":
detector = NanoDetONNX(args.model_path, input_shape=args.input_shape)
elif args.backend == "torch":
detector = NanoDetTorch(
args.model_path, args.cfg_path, input_shape=args.input_shape
)
else:
raise ValueError
# detect folder
detector.detect_folder(args.img_fold, args.result_fold)
def test_one():
detector = NanoDetNCNN("./weight/nanodet_m.param", "./weight/nanodet_m.bin")
img = cv2.imread("./data/2.jpg")
bbox, label, score = detector.detect(img)
img_draw = detector.draw_box(img, bbox, label, score)
plt.imshow(img_draw[..., ::-1])
plt.axis("off")
plt.show()
if __name__ == "__main__":
main()
|
'''
Created on 09. Okt. 2016
@author: chof
'''
from . import astring
from .gitanalyzer import GitAnalyzer
from .db import DbDump
from shutil import copyfile
from os.path import join as joinPath, basename, isfile
class DBConfig(object):
def __init__(self, cfg):
'''
Constructor
'''
self.gitAnalyzer = GitAnalyzer(cfg)
self.db = DbDump(cfg)
self.cfg = cfg
self.logger = cfg.logger
#set basic stuff
self.environments = self.cfg.getEnvironments()
'''
creates a snapshot from the database for the given commit
'''
def snapshot(self):
#***************************************************************************
for env in self.environments:
self.logger.info("Make all database snapshots for %s" % (env))
self.db.makealldumps(env)
if (self.cfg.structureFolder != None):
self.db.extractallstructures(env, self.cfg.fullpath(self.cfg.structureFolder))
if (self.cfg.structureFolder != None):
head = self.cfg.getHeadHash()
repo = self.cfg.repo
repo.git.add(self.cfg.fullpath(self.cfg.structureFolder))
repo.index.commit("stored structures for %s" % head)
self.logger.info("Commited new database structures under %s" % (self.cfg.getHeadHash()))
'''
restore a specific db snapshot without executing any scripts added after
the snapshot
returns the dump commit
'''
def restore(self, dump = None):
#***************************************************************************
dump = self.gitAnalyzer.getNewestDumpCommit(self.cfg.getHead(),
self.db.getAllDumpHashs()) if dump == None else dump
for env in self.environments:
self.logger.info("Restore databases for %s from %s" % (env, dump))
self.db.restorealldumpsforcommit(dump, env)
return dump
def execute(self, script):
#***************************************************************************
for env in self.environments:
if (isfile(script)):
self.db.executeScript(script, env)
else:
self.logger.warn("Script %s does not exist anymore and is therefore ignored!" % (script))
'''
The checkout command takes care of a branch checkout:
If the branch
- is a new one the branchpoint will be stored in branch-index.json
and the databases dumped for reference point
- is an existing one the db will be restored based on the latest restore
point and the scripts added since then
'''
def checkout(self, newonly):
#***************************************************************************
if newonly:
self.switch(newonly)
else:
(commit, branch, newbranch) = self.gitAnalyzer.checkout()
if newbranch:
self.logger.info("New branch %s created: mark and backup state at %s"
% (branch, commit))
for env in self.environments:
self.db.makealldumps(env)
else:
self.logger.info("Restore db structure for %s" % (branch))
self.switch(newonly)
def merge(self, main, topic):
#***************************************************************************
lca = self.gitAnalyzer.findLatestCommonAnchestor(main.commit, topic.commit)
dump = self.gitAnalyzer.getNewestDumpCommit(lca, self.db.getAllDumpHashs())
self.logger.info('Merging DB scripts from branch %s into %s' %
(astring(topic), astring(main)))
self.logger.debug('Branch point is %s' % (astring(lca)))
self.restore(dump)
#self._updateDBByScriptsFrom(dump)
self.gitAnalyzer.extractDBChangesSimple(main.commit, dump)
print("##")
self.gitAnalyzer.extractDBChangesSimple(dump, topic.commit)
self.snapshot()
'''
The switch command performs a switch of the given environments to the current
db state of the branch.
This is achieved by the following actions which are executed consecutively:
1. take the last db hash created in the commit tree of the current head
2. identify all db scripts commited since the last hash
3. execute them in the order of their commit (first commited first,
no matter how often it has been recommitted since) but in the
current state of the head
'''
def switch(self, newonly):
#***************************************************************************
self.logger.info("Switch to branch at head %s" % self.cfg.getHead())
if newonly != True:
latestDump = self.restore()
self._updateDBByScriptsFrom(latestDump)
else:
for script in self._listScripts():
self.execute(script)
def _listScripts(self):
#***************************************************************************
lastcommit_file = \
"%s/lastcommit" % self.cfg.fullpath(self.cfg.structureFolder)
if (isfile(lastcommit_file)):
with open(lastcommit_file, 'r') as f:
hash = f.readline()
lastcommit = self.cfg.repo.commit(hash)
else:
lastcommit = self.gitAnalyzer.getNewestDumpCommit(self.cfg.getHead(),
self.db.getAllDumpHashs())
scripts = self.gitAnalyzer.extractDBChanges(self.cfg.getHead(), lastcommit)
dbscripts = []
for script in scripts:
if isfile(script[0]):
dbscripts.append(script[0])
return dbscripts
def list(self):
#***************************************************************************
dbscripts = self._listScripts()
print(','.join(dbscripts))
def _buildScriptForEnvironment(self, outputPath, env):
#***************************************************************************
dbscripts = self._listScripts()
runnumber = 1
for script in dbscripts:
dst = joinPath(outputPath, "%03d_%s_%s" %(runnumber, env, basename(script)))
self.logger.info("Copy %s to %s" % (script, dst))
copyfile(script, dst)
self.db.prepareScriptFor(dst, env)
runnumber += 1
def build(self, outputPath):
for env in self.environments:
self._buildScriptForEnvironment(outputPath, env)
def _updateDBByScriptsFrom(self, latestDump):
#***************************************************************************
scripts = self.gitAnalyzer.extractDBChanges(self.cfg.getHead(), latestDump)
for script in scripts:
self.execute(script[0])
|
<reponame>861934367/genecast<filename>build/lib/genecast_package/cnv_analysis.py
## this tool is for cnv analysis
## author: taozhou
## email: <EMAIL>
import pandas as pd
from glob import glob
import numpy as np
import os
from genecast_package.core import make_result_folder
from genecast_package.snv_analysis import get_host_gene
import warnings
warnings.filterwarnings("ignore")
class MethodException(Exception):
pass
def split_gene_data(data, data_type):
new_data = {"gene": [], data_type: []}
for genes, value in zip(data["gene"], data[data_type]):
for gene in genes.split(";"):
new_data["gene"].append(gene)
new_data[data_type].append(value)
data = pd.DataFrame(new_data)
return data
def parser_cnr(file, data_type="log2"):
data = pd.read_table(file, usecols=["gene", "log2"])
data = data.loc[data["gene"] != "Background"]
data = split_gene_data(data, data_type)
groups = pd.DataFrame(data.groupby(data["gene"]).median())
groups.columns = [file.split("/")[-1].split(".")[0]]
return groups
def parser_call_cnr(file, data_type="log2"):
data = pd.read_table(file, usecols=["gene", "log2", "cn"])
data = data.loc[data["gene"] != "Background"]
data = split_gene_data(data, data_type)
groups = pd.DataFrame(data[data_type].groupby(data["gene"]).median())
groups.columns = [file.split("/")[-1].split(".")[0]]
return groups
def get_host_gene_cnv(host_gene_file, a, b, data_type="log2"):
gene_list = get_host_gene(host_gene_file)
if data_type == "log2": fun = parser_cnr; pattern = "/*cnr"
else: fun = parser_call_cnr ; pattern = "/*call"
a_group = []
for file in glob(a + pattern):
a_group.append(file.split("/")[-1].split(".")[0])
gene_list = pd.merge(gene_list, fun(file, data_type=data_type), left_on="gene", right_index=True, how="left")
b_group = []
for file in glob(b + pattern):
b_group.append(file.split("/")[-1].split(".")[0])
gene_list = pd.merge(gene_list, fun(file, data_type=data_type), left_on="gene", right_index=True, how="left")
gene_list.index = gene_list["gene"]
del gene_list["gene"]
if 0 in gene_list.dropna(how="all").fillna(0):
data = gene_list.dropna(how="all").fillna(0).drop(0, axis=0)
else:
data = gene_list.dropna(how="all").fillna(0)
return data, a_group, b_group
def cnv(args=None):
make_result_folder(args=args, fun=get_host_gene_cnv, which="cnv")
if __name__ == "__main__":
host_gene_file = "target_gene.txt"
groups = ["CESC", "OV", "UCEC"]
p = 0.05
root_dir = os.getcwd()
make_result_folder(host_gene_file, groups, p, root_dir, fun=get_host_gene_cnv, which="cnv", \
prediction_method="LinearSVC", C=1, n_folds=5, criterion='aic', penalty="l2", alpha=0.025, threshold=0)
|
<reponame>mbattistello/lambda_converters
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _GraphDS.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_GraphDS', [dirname(__file__)])
except ImportError:
import _GraphDS
return _GraphDS
if fp is not None:
try:
_mod = imp.load_module('_GraphDS', fp, pathname, description)
finally:
fp.close()
return _mod
_GraphDS = swig_import_helper()
del swig_import_helper
else:
import _GraphDS
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _GraphDS.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_GraphDS.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_GraphDS.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_GraphDS.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_GraphDS.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_GraphDS.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_GraphDS.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_GraphDS.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_GraphDS.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_GraphDS.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_GraphDS.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_GraphDS.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_GraphDS.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_GraphDS.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_GraphDS.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_GraphDS.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_GraphDS.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _GraphDS.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.TCollection
import OCC.Standard
import OCC.MMgt
def register_handle(handle, base_object):
"""
Inserts the handle into the base object to
prevent memory corruption in certain cases
"""
try:
if base_object.IsKind("Standard_Transient"):
base_object.thisHandle = handle
base_object.thisown = False
except:
pass
GraphDS_OnlyInput = _GraphDS.GraphDS_OnlyInput
GraphDS_OnlyOutput = _GraphDS.GraphDS_OnlyOutput
GraphDS_InputAndOutput = _GraphDS.GraphDS_InputAndOutput
GraphDS_OnlyFront = _GraphDS.GraphDS_OnlyFront
GraphDS_OnlyBack = _GraphDS.GraphDS_OnlyBack
GraphDS_FrontAndBack = _GraphDS.GraphDS_FrontAndBack
class GraphDS_DataMapIteratorOfEntityRoleMap(OCC.TCollection.TCollection_BasicMapIterator):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param aMap:
:type aMap: GraphDS_EntityRoleMap &
:rtype: None
"""
_GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_swiginit(self,_GraphDS.new_GraphDS_DataMapIteratorOfEntityRoleMap(*args))
def Initialize(self, *args):
"""
:param aMap:
:type aMap: GraphDS_EntityRoleMap &
:rtype: None
"""
return _GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_Initialize(self, *args)
def Key(self, *args):
"""
:rtype: Handle_Standard_Transient
"""
return _GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_Key(self, *args)
def Value(self, *args):
"""
:rtype: GraphDS_EntityRole
"""
return _GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_Value(self, *args)
__swig_destroy__ = _GraphDS.delete_GraphDS_DataMapIteratorOfEntityRoleMap
GraphDS_DataMapIteratorOfEntityRoleMap.Initialize = new_instancemethod(_GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_Initialize,None,GraphDS_DataMapIteratorOfEntityRoleMap)
GraphDS_DataMapIteratorOfEntityRoleMap.Key = new_instancemethod(_GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_Key,None,GraphDS_DataMapIteratorOfEntityRoleMap)
GraphDS_DataMapIteratorOfEntityRoleMap.Value = new_instancemethod(_GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_Value,None,GraphDS_DataMapIteratorOfEntityRoleMap)
GraphDS_DataMapIteratorOfEntityRoleMap_swigregister = _GraphDS.GraphDS_DataMapIteratorOfEntityRoleMap_swigregister
GraphDS_DataMapIteratorOfEntityRoleMap_swigregister(GraphDS_DataMapIteratorOfEntityRoleMap)
class GraphDS_DataMapNodeOfEntityRoleMap(OCC.TCollection.TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:param I:
:type I: GraphDS_EntityRole &
:param n:
:type n: TCollection_MapNodePtr &
:rtype: None
"""
_GraphDS.GraphDS_DataMapNodeOfEntityRoleMap_swiginit(self,_GraphDS.new_GraphDS_DataMapNodeOfEntityRoleMap(*args))
def Key(self, *args):
"""
:rtype: Handle_Standard_Transient
"""
return _GraphDS.GraphDS_DataMapNodeOfEntityRoleMap_Key(self, *args)
def Value(self, *args):
"""
:rtype: GraphDS_EntityRole
"""
return _GraphDS.GraphDS_DataMapNodeOfEntityRoleMap_Value(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_GraphDS_DataMapNodeOfEntityRoleMap(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _GraphDS.delete_GraphDS_DataMapNodeOfEntityRoleMap
GraphDS_DataMapNodeOfEntityRoleMap.Key = new_instancemethod(_GraphDS.GraphDS_DataMapNodeOfEntityRoleMap_Key,None,GraphDS_DataMapNodeOfEntityRoleMap)
GraphDS_DataMapNodeOfEntityRoleMap.Value = new_instancemethod(_GraphDS.GraphDS_DataMapNodeOfEntityRoleMap_Value,None,GraphDS_DataMapNodeOfEntityRoleMap)
GraphDS_DataMapNodeOfEntityRoleMap_swigregister = _GraphDS.GraphDS_DataMapNodeOfEntityRoleMap_swigregister
GraphDS_DataMapNodeOfEntityRoleMap_swigregister(GraphDS_DataMapNodeOfEntityRoleMap)
class Handle_GraphDS_DataMapNodeOfEntityRoleMap(OCC.TCollection.Handle_TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_swiginit(self,_GraphDS.new_Handle_GraphDS_DataMapNodeOfEntityRoleMap(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_DownCast)
__swig_destroy__ = _GraphDS.delete_Handle_GraphDS_DataMapNodeOfEntityRoleMap
Handle_GraphDS_DataMapNodeOfEntityRoleMap.Nullify = new_instancemethod(_GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_Nullify,None,Handle_GraphDS_DataMapNodeOfEntityRoleMap)
Handle_GraphDS_DataMapNodeOfEntityRoleMap.IsNull = new_instancemethod(_GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_IsNull,None,Handle_GraphDS_DataMapNodeOfEntityRoleMap)
Handle_GraphDS_DataMapNodeOfEntityRoleMap.GetObject = new_instancemethod(_GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_GetObject,None,Handle_GraphDS_DataMapNodeOfEntityRoleMap)
Handle_GraphDS_DataMapNodeOfEntityRoleMap_swigregister = _GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_swigregister
Handle_GraphDS_DataMapNodeOfEntityRoleMap_swigregister(Handle_GraphDS_DataMapNodeOfEntityRoleMap)
def Handle_GraphDS_DataMapNodeOfEntityRoleMap_DownCast(*args):
return _GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_DownCast(*args)
Handle_GraphDS_DataMapNodeOfEntityRoleMap_DownCast = _GraphDS.Handle_GraphDS_DataMapNodeOfEntityRoleMap_DownCast
class GraphDS_EntityRoleMap(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_GraphDS.GraphDS_EntityRoleMap_swiginit(self,_GraphDS.new_GraphDS_EntityRoleMap(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: GraphDS_EntityRoleMap &
:rtype: GraphDS_EntityRoleMap
"""
return _GraphDS.GraphDS_EntityRoleMap_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: GraphDS_EntityRoleMap &
:rtype: GraphDS_EntityRoleMap
"""
return _GraphDS.GraphDS_EntityRoleMap_Set(self, *args)
def ReSize(self, *args):
"""
:param NbBuckets:
:type NbBuckets: int
:rtype: None
"""
return _GraphDS.GraphDS_EntityRoleMap_ReSize(self, *args)
def Clear(self, *args):
"""
:rtype: None
"""
return _GraphDS.GraphDS_EntityRoleMap_Clear(self, *args)
def Bind(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:param I:
:type I: GraphDS_EntityRole &
:rtype: bool
"""
return _GraphDS.GraphDS_EntityRoleMap_Bind(self, *args)
def IsBound(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:rtype: bool
"""
return _GraphDS.GraphDS_EntityRoleMap_IsBound(self, *args)
def UnBind(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:rtype: bool
"""
return _GraphDS.GraphDS_EntityRoleMap_UnBind(self, *args)
def Find(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:rtype: GraphDS_EntityRole
"""
return _GraphDS.GraphDS_EntityRoleMap_Find(self, *args)
def ChangeFind(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:rtype: GraphDS_EntityRole
"""
return _GraphDS.GraphDS_EntityRoleMap_ChangeFind(self, *args)
def Find1(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:rtype: Standard_Address
"""
return _GraphDS.GraphDS_EntityRoleMap_Find1(self, *args)
def ChangeFind1(self, *args):
"""
:param K:
:type K: Handle_Standard_Transient &
:rtype: Standard_Address
"""
return _GraphDS.GraphDS_EntityRoleMap_ChangeFind1(self, *args)
__swig_destroy__ = _GraphDS.delete_GraphDS_EntityRoleMap
GraphDS_EntityRoleMap.Assign = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_Assign,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.Set = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_Set,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.ReSize = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_ReSize,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.Clear = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_Clear,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.Bind = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_Bind,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.IsBound = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_IsBound,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.UnBind = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_UnBind,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.Find = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_Find,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.ChangeFind = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_ChangeFind,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.Find1 = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_Find1,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap.ChangeFind1 = new_instancemethod(_GraphDS.GraphDS_EntityRoleMap_ChangeFind1,None,GraphDS_EntityRoleMap)
GraphDS_EntityRoleMap_swigregister = _GraphDS.GraphDS_EntityRoleMap_swigregister
GraphDS_EntityRoleMap_swigregister(GraphDS_EntityRoleMap)
|
"""
A utils for Markdown
html : render markdown to html
toc : Get the Table of Content
extract_images: Return a list of images, can be used to extract the top image
"""
import os
import markdown
from markdown.treeprocessors import Treeprocessor
from markdown.extensions import Extension
from jinja2.nodes import CallBlock
from jinja2.ext import Extension as JExtension
###
# This extension will extract all the images from the doc
class ExtractImagesExtension(Extension):
def extendMarkdown(self, md, md_globals):
ext = ExtractImagesTreeprocessor(md)
md.treeprocessors.add("imageextractor", ext, "_end")
class ExtractImagesTreeprocessor(Treeprocessor):
def run(self, root):
"Find all images and append to markdown.images. "
self.markdown.images = []
for image in root.getiterator("img"):
self.markdown.images.append(image.attrib["src"])
###
# LazyImageExtension
# An extension to delay load of images on the page
class LazyImageExtension(Extension):
def extendMarkdown(self, md, md_globals):
ext = LazyImageTreeprocessor(md)
md.treeprocessors.add("lazyimage", ext, "_end")
class LazyImageTreeprocessor(Treeprocessor):
def run(self, root):
for image in root.getiterator("img"):
image.set("data-src", image.attrib["src"])
image.set("src", "")
image.set("class", "lazy")
# EMBED
# [[embed]](http://)
# An extension to delay load of images on the page.
# It adds the class oembed in the link
class OEmbedExtension(Extension):
def extendMarkdown(self, md, md_globals):
ext = OEmbedTreeprocessor(md)
md.treeprocessors.add("oembedextension", ext, "_end")
class OEmbedTreeprocessor(Treeprocessor):
def run(self, root):
for a in root.getiterator("a"):
if a.text.strip() == "[embed]":
a.text = ""
a.set("class", "oembed")
a.set("target", "_blank")
# ------------------------------------------------------------------------------
def html(text, lazy_images=False):
"""
To render a markdown format text into HTML.
- If you want to also build a Table of Content inside of the markdow,
add the tags: [TOC]
It will include a <ul><li>...</ul> of all <h*>
:param text:
:param lazy_images: bool - If true, it will activate the LazyImageExtension
:return:
"""
extensions = [
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.toc',
'markdown.extensions.tables',
OEmbedExtension()
]
if lazy_images:
extensions.append(LazyImageExtension())
return markdown.markdown(text, extensions=extensions)
def toc(text):
"""
Return a table of context list
:param text:
:return:
"""
extensions = ['markdown.extensions.toc']
mkd = markdown.Markdown(extensions=extensions)
html = mkd.convert(text)
return mkd.toc
def extract_images(text):
"""
Extract all images in the content
:param text:
:return:
"""
extensions = [ExtractImagesExtension()]
mkd = markdown.Markdown(extensions=extensions)
html = mkd.convert(text)
return mkd.images
# ------------------------------------------------------------------------------
class MarkdownTagExtension(JExtension):
tags = set(['markdown'])
def __init__(self, environment):
super(MarkdownTagExtension, self).__init__(environment)
environment.extend(
markdowner=markdown.Markdown(extensions=['extra'])
)
def parse(self, parser):
lineno = next(parser.stream).lineno
body = parser.parse_statements(
['name:endmarkdown'],
drop_needle=True
)
return CallBlock(
self.call_method('_markdown_support'),
[],
[],
body
).set_lineno(lineno)
def _markdown_support(self, caller):
block = caller()
block = self._strip_whitespace(block)
return self._render_markdown(block)
def _strip_whitespace(self, block):
lines = block.split('\n')
whitespace = ''
output = ''
if (len(lines) > 1):
for char in lines[1]:
if (char == ' ' or char == '\t'):
whitespace += char
else:
break
for line in lines:
output += line.replace(whitespace, '', 1) + '\r\n'
return output.strip()
def _render_markdown(self, block):
block = self.environment.markdowner.convert(block)
return block
class MarkdownExtension(JExtension):
options = {}
file_extensions = '.md'
def preprocess(self, source, name, filename=None):
if (not name or
(name and not os.path.splitext(name)[1] in self.file_extensions)):
return source
return html(source)
# Markdown
mkd = markdown.Markdown(extensions=[
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.toc',
'markdown.extensions.tables'
])
def convert(text):
"""
Convert MD text to HTML
:param text:
:return:
"""
html = mkd.convert(text)
mkd.reset()
return html
def get_toc(text):
"""
Extract Table of Content of MD
:param text:
:return:
"""
mkd.convert(text)
toc = mkd.toc
mkd.reset()
return toc
|
<filename>signin/jd_job/bean_app.py
import json
import random
from .common import RequestError, Job
class BeanApp(Job):
"""
京东客户端签到领京豆. 由于是 App (Mobile) 端页面, 登录方式与领钢镚的相同, 不同于电脑端领京豆.
"""
job_name = '京东客户端签到领京豆'
index_url = 'https://bean.m.jd.com'
info_url = 'https://api.m.jd.com/client.action?functionId=queryBeanIndex'
sign_url = 'https://api.m.jd.com/client.action?functionId=signBeanStart'
test_url = 'https://home.m.jd.com'
poker_url = 'https://api.m.jd.com/client.action?functionId=getCardResult'
client_info = {
'client': 'ld',
'clientVersion': '1.0.0'
}
def is_signed(self):
try:
data = self.fetch_data(self.info_url)
except RequestError as e:
self.logger.error('签到信息获取失败: {}'.format(e.message))
return False
# 根据测试, 2 表示已签到, 4 表示未签到, 5 表示未登录
signed = (data['status'] == '2')
sign_days = int(data['continuousDays'])
beans_count = int(data['totalUserBean'])
self.logger.info('今日已签到: {}; 签到天数: {}; 现有京豆: {}'.format(signed, sign_days, beans_count))
return signed
def sign(self):
try:
data = self.fetch_data(self.sign_url)
except RequestError as e:
self.logger.error('签到失败: {}'.format(e.message))
return False
sign_success = (data['status'] == '1')
message = data['signShowBean']['signText']
message = message.replace('signAward', data['signShowBean']['signAward'])
self.logger.info('签到成功: {}; Message: {}'.format(sign_success, message))
poker = data['signShowBean']
# "complated": 原文如此, 服务端的拼写错误...
poker_picked = poker['complated']
if not poker_picked:
pick_success = self.pick_poker(poker)
# 同时成功才视为签到成功
sign_success &= pick_success
return sign_success
def pick_poker(self, poker):
poker_to_pick = random.randint(1, len(poker['awardList']))
try:
payload = {'body': json.dumps({'index': poker_to_pick})}
data = self.fetch_data(self.poker_url, payload=payload)
except RequestError as e:
self.logger.error('翻牌失败: {}'.format(e.message))
return False
message = data['signText'].replace('signAward', data['signAward'])
self.logger.info('翻牌成功: {}'.format(message))
return True
def fetch_data(self, url, payload=None):
payload = {**payload, **self.client_info} if payload else self.client_info
r = self.session.get(url, params=payload)
try:
as_json = r.json()
except ValueError:
raise RequestError('unexpected response: url: {}; http code: {}'.format(url, r.status_code), response=r)
if as_json['code'] != '0' or 'errorCode' in as_json or 'errorMessage' in as_json:
error_msg = as_json.get('echo') or as_json.get('errorMessage') or str(as_json)
error_code = as_json.get('errorCode') or as_json.get('code')
raise RequestError(error_msg, code=error_code, response=r)
# 请求成功
return as_json['data']
|
<filename>bnl_ml_examples/supervised/data.py<gh_stars>1-10
from pathlib import Path
import h5py
from sklearn.model_selection import train_test_split
import numpy as np
def load_data(data_dir, uniform=True, seed=1234):
"""
Loads min/max normalized data according to split preference
Parameters
----------
data_dir : Path
Directory containing .hdf5 files
uniform : bool
To load data randomly uniform or to split according to data quality
False will give the failure mode presented in the paper which requires feature engineering
seed : int
Seed for random shuffle
Returns
-------
X_train, y_train, X_test, y_test
"""
def split_file(path):
with h5py.File(path, "r") as f:
score = list()
mu = list()
energy = list()
for uid in f.keys():
score.append(int(f[uid].attrs["score"]))
mu.append(list(f[uid]["mu"]))
energy.append(list(f[uid]["energy"]))
return score, mu, energy
scores = []
data = []
energies = []
if uniform:
paths = list(data_dir.glob("*.hdf5"))
for path in paths:
score, mu, energy = split_file(path)
scores.extend(score)
data.extend(mu)
energies.extend(energy)
X_train, X_test, y_train, y_test = train_test_split(
data, scores, test_size=0.2, shuffle=True, random_state=seed
)
else:
train_paths = [
data_dir / "fluorescence_training_set.hdf5",
data_dir / "transmission_training_set.hdf5",
]
test_paths = [data_dir / "verygood_training_set.hdf5"]
for path in train_paths:
score, mu, energy = split_file(path)
scores.extend(score)
data.extend(mu)
energies.extend(energy)
test_scores = []
test_data = []
test_energies = []
for path in test_paths:
score, mu, energy = split_file(path)
test_scores.extend(score)
test_data.extend(mu)
test_energies.extend(energy)
X_train, X_test, y_train, y_test = train_test_split(
data, scores, test_size=0.1, shuffle=True, random_state=seed
)
X_test = np.concatenate([np.array(test_data), X_test])
y_test = np.concatenate([np.array(test_scores), y_test])
# Normalization
X_train = (X_train - np.min(X_train, axis=1, keepdims=True)) / (
np.max(X_train, axis=1, keepdims=True)
- np.min(X_train, axis=1, keepdims=True)
+ 1e-8
)
X_test = (X_test - np.min(X_test, axis=1, keepdims=True)) / (
np.max(X_test, axis=1, keepdims=True)
- np.min(X_test, axis=1, keepdims=True)
+ 1e-8
)
y_train = np.array(y_train)
return X_train, y_train, X_test, y_test
def featurization(X):
def autocorr(x, t=1):
return np.corrcoef(np.array([x[:-t], x[t:]]))[0, 1]
def extract_autocorrelation_features(x):
ac1 = autocorr(x, 1)
ac2 = autocorr(x, 2)
ac3 = autocorr(x, 3)
ac4 = autocorr(x, 4)
corr_coeffs = [ac1, ac2, ac3, ac4]
labels = ["_ac" + str(i) for i in range(1, 5)]
return corr_coeffs, labels
def extract_start_end(X):
start_end = abs(X[:, :5].mean(axis=1) - X[:, -5:].mean(axis=1))
return start_end
def basic_stats(X):
mean = np.mean(X, axis=1)
var = np.var(X, axis=1)
_sum = np.sum(X, axis=1)
argmax = np.argmax(X, axis=1)
return np.stack([mean, _sum, var, argmax], axis=1)
corr_coeffs = []
for i in range(X.shape[0]):
cc, l = extract_autocorrelation_features(X[i, :])
cc = np.nan_to_num(cc, nan=0)
corr_coeffs.append(cc)
corr_coeffs = np.array(corr_coeffs)
start_end = np.expand_dims(extract_start_end(X), axis=1)
basic = basic_stats(X)
# repete for derivative
diff = X[:, 1:] - X[:, :-1]
d_corr_coeffs = []
for i in range(diff.shape[0]):
cc, l = extract_autocorrelation_features(diff[i, :])
cc = np.nan_to_num(cc, nan=0)
d_corr_coeffs.append(cc)
d_corr_coeffs = np.array(corr_coeffs)
d_start_end = np.expand_dims(extract_start_end(diff), axis=1)
d_basic = basic_stats(diff)
return np.concatenate(
[corr_coeffs, start_end, basic, d_corr_coeffs, d_start_end, d_basic], axis=1
)
|
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import logging
import unittest
from unittest.mock import Mock, patch, MagicMock
from github import Github
import cla
from cla.models.github_models import get_pull_request_commit_authors, handle_commit_from_user, MockGitHub
from cla.models.dynamo_models import Signature, Project
from cla.models.github_models import GitHub as GithubModel
class TestGitHubModels(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.mock_user_patcher = patch('cla.models.github_models.cla.utils.get_user_instance')
cls.mock_signature_patcher = patch('cla.models.github_models.cla.utils.get_signature_instance')
cls.mock_utils_patcher = patch('cla.models.github_models.cla.utils')
cls.mock_utils_get = cls.mock_utils_patcher.start()
cls.mock_user_get = cls.mock_user_patcher.start()
cls.mock_signature_get = cls.mock_signature_patcher.start()
@classmethod
def tearDownClass(cls) -> None:
cls.mock_user_patcher.stop()
cls.mock_signature_patcher.stop()
cls.mock_utils_patcher.stop()
def setUp(self) -> None:
# Only show critical logging stuff
cla.log.level = logging.CRITICAL
self.assertTrue(cla.conf['GITHUB_OAUTH_TOKEN'] != '',
'Missing GITHUB_OAUTH_TOKEN environment variable - required to run unit tests')
# cla.log.debug('Using GITHUB_OAUTH_TOKEN: {}...'.format(cla.conf['GITHUB_OAUTH_TOKEN'][:5]))
def tearDown(self) -> None:
pass
def test_commit_authors_with_named_user(self) -> None:
"""
Test that we can load commit authors from a pull request that does have the traditional
github.NamedUser.NamedUser object filled out
"""
g = Github(cla.conf['GITHUB_OAUTH_TOKEN'])
repo = g.get_repo(27729926) # grpc/grpc-java
pr = repo.get_pull(6142) # example: https://github.com/grpc/grpc-java/pull/6142
cla.log.info("Retrieved GitHub PR: {}".format(pr))
commits = pr.get_comments()
cla.log.info("Retrieved GitHub PR: {}, commits: {}".format(pr, commits))
# Returns a list tuples, which look like (commit_sha_string, (author_id, author_username, author_email),
# which, as you can see, the second element of the tuple is another tuple containing the author information
commit_authors = get_pull_request_commit_authors(pr)
# cla.log.info("Result: {}".format(commit_authors))
# cla.log.info([author_info[1] for commit, author_info in commit_authors])
self.assertTrue(4779759 in [author_info[0] for commit, author_info in commit_authors])
def test_commit_authors_no_named_user(self) -> None:
"""
Test that we can load commit authors from a pull request that does NOT have the traditional
github.NamedUser.NamedUser object filled out
"""
# We need to mock this service so that we can test our business logic - disabling this test for now
# as they closed the PR
g = Github(cla.conf['GITHUB_OAUTH_TOKEN'])
repo = g.get_repo(27729926) # grpc/grpc-java
pr = repo.get_pull(6152) # example: https://github.com/grpc/grpc-java/pull/6152
cla.log.info("Retrieved GitHub PR: {}".format(pr))
commits = pr.get_comments()
cla.log.info("Retrieved GitHub PR: {}, commits: {}".format(pr, commits))
# Returns a list tuples, which look like (commit_sha_string, (author_id, author_username, author_email),
# which, as you can see, the second element of the tuple is another tuple containing the author information
# commit_authors = get_pull_request_commit_authors(pr)
# cla.log.info("Result: {}".format(commit_authors))
# cla.log.info([author_info[1] for commit, author_info in commit_authors])
# self.assertTrue('snalkar' in [author_info[1] for commit, author_info in commit_authors])
def test_handle_commit_author_whitelisted(self) -> None:
"""
Test case where commit authors have no signatures but have been whitelisted and should
return missing list containing a whitelisted flag
"""
# Mock user not existing and happens to be whitelisted
self.mock_user_get.return_value.get_user_by_github_id.return_value = None
self.mock_user_get.return_value.get_user_by_email.return_value = None
self.mock_signature_get.return_value.get_signatures_by_project.return_value = [Signature()]
self.mock_utils_get.return_value.is_approved.return_value = True
missing = []
signed = []
project = Project()
project.set_project_id('fake_project_id')
handle_commit_from_user(project, 'fake_sha', (123, 'foo', '<EMAIL>'), signed, missing)
# We commented out this functionality for now - re-enable if we add it back
# self.assertListEqual(missing, [('fake_sha', [123, 'foo', '<EMAIL>', True])])
self.assertEqual(signed, [])
def test_handle_invalid_author(self) -> None:
"""
Test case handling non existant author tagged to a given commit
"""
project = Project()
author_info = None
signed = []
missing = []
handle_commit_from_user(project, 'fake_sha', author_info, signed, missing)
self.assertEqual(signed, [])
self.assertEqual(missing, [('fake_sha', [])])
class TestGithubModelsPrComment(unittest.TestCase):
def setUp(self) -> None:
self.github = MockGitHub()
self.github.update_change_request = MagicMock()
def tearDown(self) -> None:
pass
def test_process_easycla_command_comment(self):
with self.assertRaisesRegex(ValueError, "missing comment body"):
self.github.process_easycla_command_comment({})
with self.assertRaisesRegex(ValueError, "unsupported comment supplied"):
self.github.process_easycla_command_comment({
"comment": {"body": "/otherbot"}
})
with self.assertRaisesRegex(ValueError, "missing github repository id"):
self.github.process_easycla_command_comment({
"comment": {"body": "/easycla"},
})
with self.assertRaisesRegex(ValueError, "missing pull request id"):
self.github.process_easycla_command_comment({
"comment": {"body": "/easycla"},
"repository": {"id": 123},
})
with self.assertRaisesRegex(ValueError, "missing installation id"):
self.github.process_easycla_command_comment({
"comment": {"body": "/easycla"},
"repository": {"id": 123},
"issue": {"number": 1},
})
self.github.process_easycla_command_comment({
"comment": {"body": "/easycla"},
"repository": {"id": 123},
"issue": {"number": 1},
"installation": {"id": 1},
})
class TestGithubUserEmails(unittest.TestCase):
def test_empty_emails(self):
with patch.object(GithubModel, "_fetch_github_emails") as _fetch_github_emails:
_fetch_github_emails.return_value = []
github = GithubModel()
emails = github.get_user_emails(None, "fake_client_id")
assert not emails
def test_emails_with_noreply(self):
with patch.object(GithubModel, "_fetch_github_emails") as _fetch_github_emails:
_fetch_github_emails.return_value = [
{
"email": "<EMAIL>",
"verified": True,
"primary": True,
"visibility": "public"
},
{
"email": "<EMAIL>",
"verified": True,
"primary": True,
"visibility": "public"
},
{
"email": "<EMAIL>",
"verified": False,
"primary": True,
"visibility": "public"
}
]
github = GithubModel()
emails = github.get_user_emails(None, "fake_client_id")
assert emails
assert len(emails) == 1
assert emails == ["<EMAIL>"]
def test_emails_with_noreply_single(self):
with patch.object(GithubModel, "_fetch_github_emails") as _fetch_github_emails:
_fetch_github_emails.return_value = [
{
"email": "<EMAIL>",
"verified": True,
"primary": True,
"visibility": "public"
},
]
github = GithubModel()
emails = github.get_user_emails(None, "fake_client_id")
assert emails
assert len(emails) == 1
assert emails == ["<EMAIL>"]
def test_emails_without_noreply(self):
with patch.object(GithubModel, "_fetch_github_emails") as _fetch_github_emails:
_fetch_github_emails.return_value = [
{
"email": "<EMAIL>",
"verified": True,
"primary": True,
"visibility": "public"
},
{
"email": "<EMAIL>",
"verified": True,
"primary": True,
"visibility": "public"
},
{
"email": "<EMAIL>",
"verified": False,
"primary": True,
"visibility": "public"
}
]
github = GithubModel()
emails = github.get_user_emails(None, "fake_client_id")
assert emails
assert len(emails) == 2
assert "<EMAIL>" in emails
assert "<EMAIL>" in emails
if __name__ == '__main__':
unittest.main()
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_env_rpc helper functions."""
import struct
from absl.testing import absltest
from absl.testing import parameterized
import mock
import numpy as np
from google.protobuf import any_pb2
from google.protobuf import struct_pb2
from dm_env_rpc.v1 import dm_env_rpc_pb2
from dm_env_rpc.v1 import tensor_utils
class PackTensorTests(parameterized.TestCase):
@parameterized.parameters(
(np.float32(2.5), 'floats'),
(2.5, 'doubles'),
(np.int32(-25), 'int32s'),
(np.int64(-25), 'int64s'),
(np.frombuffer(b'\xF0\xF1\xF2\xF3', np.uint32)[0], 'uint32s'),
(np.frombuffer(b'\xF0\xF1\xF2\xF3\xF4\xF5\xF6\xF7',
np.uint64)[0], 'uint64s'),
(True, 'bools'),
(False, 'bools'),
('foo', 'strings'),
)
def test_pack_scalars(self, scalar, expected_payload):
tensor = tensor_utils.pack_tensor(scalar)
self.assertEqual([], tensor.shape)
self.assertEqual([scalar], getattr(tensor, expected_payload).array)
@parameterized.parameters(
(np.int8(-25), 'b', 'int8s'),
(np.uint8(250), 'B', 'uint8s'),
)
def test_pack_scalar_bytes(self, scalar, fmt, expected_payload):
tensor = tensor_utils.pack_tensor(scalar)
self.assertEqual([], tensor.shape)
actual = struct.unpack(fmt, getattr(tensor, expected_payload).array)
self.assertEqual(scalar, actual)
def test_pack_scalar_protos(self):
scalar = struct_pb2.Value(string_value='my message')
tensor = tensor_utils.pack_tensor(scalar)
self.assertEqual([], tensor.shape)
self.assertLen(tensor.protos.array, 1)
unpacked = struct_pb2.Value()
self.assertTrue(tensor.protos.array[0].Unpack(unpacked))
self.assertEqual(scalar, unpacked)
def test_pack_scalar_any_proto(self):
scalar = struct_pb2.Value(string_value='my message')
scalar_any = any_pb2.Any()
scalar_any.Pack(scalar)
tensor = tensor_utils.pack_tensor(scalar_any)
self.assertEqual([], tensor.shape)
self.assertLen(tensor.protos.array, 1)
unpacked = struct_pb2.Value()
self.assertTrue(tensor.protos.array[0].Unpack(unpacked))
self.assertEqual(scalar, unpacked)
@parameterized.parameters(
(25, np.float32, 'floats'),
(25, np.float64, 'doubles'),
(25, np.int32, 'int32s'),
(25, np.int64, 'int64s'),
(25, np.uint32, 'uint32s'),
(25, np.uint64, 'uint64s'),
(2**64-1, np.uint64, 'uint64s'),
(True, np.bool, 'bools'),
(False, np.bool, 'bools'),
('foo', np.str, 'strings'),
)
def test_pack_scalars_specific_dtype(self, scalar, dtype, expected_payload):
tensor = tensor_utils.pack_tensor(scalar, dtype)
self.assertEqual([], tensor.shape)
self.assertEqual([scalar], getattr(tensor, expected_payload).array)
def test_pack_with_dm_env_rpc_data_type(self):
tensor = tensor_utils.pack_tensor([5], dm_env_rpc_pb2.DataType.FLOAT)
self.assertEqual([5], tensor.floats.array)
@parameterized.parameters(
([np.int8(-25), np.int8(-23)], '2b', 'int8s'),
([np.uint8(249), np.uint8(250)], '2B', 'uint8s'),
)
def test_pack_bytes_array(self, scalar, fmt, expected_payload):
tensor = tensor_utils.pack_tensor(scalar)
self.assertEqual([2], tensor.shape)
actual = struct.unpack(fmt, getattr(tensor, expected_payload).array)
np.testing.assert_array_equal(scalar, actual)
@parameterized.parameters(
(np.array([1.0, 2.0], dtype=np.float32), 'floats'),
(np.array([1.0, 2.0], dtype=np.float64), 'doubles'),
([1.0, 2.0], 'doubles'),
(np.array([1, 2], dtype=np.int32), 'int32s'),
(np.array([1, 2], dtype=np.int64), 'int64s'),
(np.array([1, 2], dtype=np.uint32), 'uint32s'),
(np.array([1, 2], dtype=np.uint64), 'uint64s'),
([True, False], 'bools'),
(np.array([True, False]), 'bools'),
(['foo', 'bar'], 'strings'),
)
def test_pack_arrays(self, array, expected_payload):
tensor = tensor_utils.pack_tensor(array)
self.assertEqual([2], tensor.shape)
packed_array = getattr(tensor, expected_payload).array
np.testing.assert_array_equal(array, packed_array)
def test_pack_proto_arrays(self):
array = np.array([
struct_pb2.Value(string_value=message)
for message in ['foo', 'bar']
])
tensor = tensor_utils.pack_tensor(array)
self.assertEqual([2], tensor.shape)
unpacked = struct_pb2.Value()
tensor.protos.array[0].Unpack(unpacked)
self.assertEqual(array[0], unpacked)
tensor.protos.array[1].Unpack(unpacked)
self.assertEqual(array[1], unpacked)
def test_pack_mixed_proto_array_fails(self):
with self.assertRaisesRegex(ValueError, 'not recognized'):
tensor_utils.pack_tensor(np.array([struct_pb2.Value(), 1, 2, 3]))
def test_packed_rowmajor(self):
array2d = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32)
tensor = tensor_utils.pack_tensor(array2d)
self.assertEqual([3, 2], tensor.shape)
np.testing.assert_array_equal([1, 2, 3, 4, 5, 6], tensor.int32s.array)
def test_mixed_scalar_types_raises_exception(self):
with self.assertRaises(TypeError):
tensor_utils.pack_tensor(['hello!', 75], dtype=np.float32)
def test_jagged_arrays_throw_exceptions(self):
with self.assertRaises(ValueError):
tensor_utils.pack_tensor([[1, 2], [3, 4, 5]])
@parameterized.parameters(
(['foo', 'bar'], np.str_),
('baz', dm_env_rpc_pb2.DataType.STRING),
(['foobar'], np.array(['foobar']).dtype),
)
def test_np_object_strings(self, value, dtype):
object_array = np.array(value, dtype=np.object)
tensor = tensor_utils.pack_tensor(object_array, dtype=dtype)
self.assertEqual(list(object_array.shape), tensor.shape)
self.assertTrue(tensor.HasField('strings'))
def test_np_object_strings_no_dtype_raises_exception(self):
with self.assertRaises(ValueError):
tensor_utils.pack_tensor(np.array(['foo'], dtype=np.object))
@parameterized.parameters(
(['foo', 42, 'bar'],),
([1, 2, 3],),
)
def test_np_object_to_strings_fail(self, bad_element):
with self.assertRaisesRegex(TypeError,
'not all elements are Python string types'):
tensor_utils.pack_tensor(
np.array(bad_element, dtype=np.object), dtype=np.str_)
def test_class_instance_throw_exception(self):
class Foo(object):
pass
with self.assertRaises(ValueError):
tensor_utils.pack_tensor(Foo())
def test_compress_integers_to_1_element_when_all_same(self):
array = np.array([1, 1, 1, 1, 1, 1], dtype=np.uint32)
packed = tensor_utils.pack_tensor(array, try_compress=True)
self.assertEqual([6], packed.shape)
self.assertEqual([1], packed.uint32s.array)
def test_compress_floats_to_1_element_when_all_same(self):
array = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5], dtype=np.float32)
packed = tensor_utils.pack_tensor(array, try_compress=True)
self.assertEqual([6], packed.shape)
self.assertEqual([1.5], packed.floats.array)
def test_compress_strings_to_1_element_when_all_same(self):
array = np.array(['foo', 'foo', 'foo', 'foo'], dtype=np.str_)
packed = tensor_utils.pack_tensor(array, try_compress=True)
self.assertEqual([4], packed.shape)
self.assertEqual(['foo'], packed.strings.array)
def test_compress_multidimensional_arrays_to_1_element_when_all_same(self):
array = np.array([[4, 4], [4, 4]], dtype=np.int32)
packed = tensor_utils.pack_tensor(array, try_compress=True)
self.assertEqual([2, 2], packed.shape)
self.assertEqual([4], packed.int32s.array)
def test_doesnt_compress_if_not_asked_to(self):
array = np.array([1, 1, 1, 1, 1, 1], dtype=np.uint32)
packed = tensor_utils.pack_tensor(array)
self.assertEqual([6], packed.shape)
self.assertEqual([1, 1, 1, 1, 1, 1], packed.uint32s.array)
def test_ask_to_compress_but_cant(self):
array = np.array([1, 1, 2, 1, 1, 1], dtype=np.uint32)
packed = tensor_utils.pack_tensor(array, try_compress=True)
self.assertEqual([6], packed.shape)
self.assertEqual([1, 1, 2, 1, 1, 1], packed.uint32s.array)
class UnpackTensorTests(parameterized.TestCase):
@parameterized.parameters(
np.float32(2.5),
np.float64(2.5),
np.int8(-25),
np.int32(-25),
np.int64(-25),
np.uint8(250),
np.frombuffer(b'\xF0\xF1\xF2\xF3', np.uint32)[0],
np.frombuffer(b'\xF0\xF1\xF2\xF3\xF4\xF5\xF6\xF7', np.uint64)[0],
True,
False,
'foo',
)
def test_unpack_scalars(self, scalar):
tensor = tensor_utils.pack_tensor(scalar)
round_trip = tensor_utils.unpack_tensor(tensor)
self.assertEqual(scalar, round_trip)
def test_unpack_scalar_proto(self):
scalar = struct_pb2.Value(string_value='my message')
tensor = tensor_utils.pack_tensor(scalar)
unpacked = struct_pb2.Value()
tensor_utils.unpack_tensor(tensor).Unpack(unpacked)
self.assertEqual(scalar, unpacked)
@parameterized.parameters(
([np.float32(2.5), np.float32(3.5)],),
([np.float64(2.5), np.float64(3.5)],),
([np.int8(-25), np.int8(-23)],),
([np.int32(-25), np.int32(-23)],),
([np.int64(-25), np.int64(-23)],),
([np.uint8(250), np.uint8(249)],),
([np.uint32(1), np.uint32(2)],),
([np.uint64(1), np.uint64(2)],),
([True, False],),
(['foo', 'bar'],),
)
def test_unpack_arrays(self, array):
tensor = tensor_utils.pack_tensor(array)
round_trip = tensor_utils.unpack_tensor(tensor)
np.testing.assert_array_equal(array, round_trip)
def test_unpack_proto_arrays(self):
array = np.array([
struct_pb2.Value(string_value=message)
for message in ['foo', 'bar']
])
tensor = tensor_utils.pack_tensor(array)
round_trip = tensor_utils.unpack_tensor(tensor)
unpacked = struct_pb2.Value()
round_trip[0].Unpack(unpacked)
self.assertEqual(array[0], unpacked)
round_trip[1].Unpack(unpacked)
self.assertEqual(array[1], unpacked)
def test_unpack_multidimensional_arrays(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.floats.array[:] = [1, 2, 3, 4, 5, 6, 7, 8]
tensor.shape[:] = [2, 4]
round_trip = tensor_utils.unpack_tensor(tensor)
expected = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
np.testing.assert_array_equal(expected, round_trip)
def test_too_few_elements(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.floats.array[:] = [1, 2, 3, 4]
tensor.shape[:] = [2, 4]
with self.assertRaisesRegex(ValueError, 'cannot reshape array'):
tensor_utils.unpack_tensor(tensor)
def test_too_many_elements(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.floats.array[:] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tensor.shape[:] = [2, 4]
with self.assertRaisesRegex(ValueError, 'cannot reshape array'):
tensor_utils.unpack_tensor(tensor)
def test_float_broadcasts_1_element_to_all_elements(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.floats.array[:] = [1]
tensor.shape[:] = [4]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array([1, 1, 1, 1], dtype=np.float32)
np.testing.assert_array_equal(expected, unpacked)
def test_integer_broadcasts_1_element_to_all_elements(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [1]
tensor.shape[:] = [4]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array([1, 1, 1, 1], dtype=np.int32)
np.testing.assert_array_equal(expected, unpacked)
def test_unsigned_integer_broadcasts_1_element_to_all_elements(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.uint8s.array = b'\x01'
tensor.shape[:] = [4]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array([1, 1, 1, 1], dtype=np.uint8)
np.testing.assert_array_equal(expected, unpacked)
def test_string_broadcasts_1_element_to_all_elements(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.strings.array[:] = ['foo']
tensor.shape[:] = [4]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array(['foo', 'foo', 'foo', 'foo'], dtype=np.str_)
np.testing.assert_array_equal(expected, unpacked)
def test_broadcasts_to_multidimensional_arrays(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [4]
tensor.shape[:] = [2, 2]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array([[4, 4], [4, 4]], dtype=np.int32)
np.testing.assert_array_equal(expected, unpacked)
def test_negative_dimension(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [1, 2, 3, 4]
tensor.shape[:] = [-1]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array([1, 2, 3, 4], dtype=np.int32)
np.testing.assert_array_equal(expected, unpacked)
def test_negative_dimension_in_matrix(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [1, 2, 3, 4, 5, 6]
tensor.shape[:] = [2, -1]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
np.testing.assert_array_equal(expected, unpacked)
def test_two_negative_dimensions_in_matrix(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [1, 2, 3, 4, 5, 6]
tensor.shape[:] = [-1, -2]
with self.assertRaisesRegex(ValueError, 'one unknown dimension'):
tensor_utils.unpack_tensor(tensor)
def test_negative_dimension_single_element(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [1]
tensor.shape[:] = [-1]
unpacked = tensor_utils.unpack_tensor(tensor)
expected = np.array([1], dtype=np.int32)
np.testing.assert_array_equal(expected, unpacked)
def test_unknown_type_raises_error(self):
tensor = mock.MagicMock()
tensor.WhichOneof.return_value = 'foo'
with self.assertRaisesRegex(TypeError, 'type foo'):
tensor_utils.unpack_tensor(tensor)
def test_scalar_with_too_many_elements_raises_error(self):
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [1, 2, 3]
with self.assertRaisesRegex(ValueError, '3 element'):
tensor_utils.unpack_tensor(tensor)
class GetTensorTypeTests(absltest.TestCase):
def test_float(self):
tensor = tensor_utils.pack_tensor(1.25)
self.assertEqual(np.float64, tensor_utils.get_tensor_type(tensor))
def test_unknown_tensor_type(self):
mock_tensor = mock.MagicMock()
mock_tensor.WhichOneof.return_value = 'foo'
with self.assertRaisesRegex(TypeError, 'foo'):
tensor_utils.get_tensor_type(mock_tensor)
class DataTypeToNpTypeTests(absltest.TestCase):
def test_float(self):
self.assertEqual(
np.float32,
tensor_utils.data_type_to_np_type(dm_env_rpc_pb2.DataType.FLOAT))
def test_empty_object_list(self):
tensor = tensor_utils.pack_tensor(np.array([], dtype=np.object))
self.assertEqual([0], tensor.shape)
def test_unknown_type(self):
with self.assertRaises(TypeError):
tensor_utils.data_type_to_np_type(30) # pytype: disable=wrong-arg-types
class NpTypeToDataTypeTests(absltest.TestCase):
def test_float32(self):
self.assertEqual(
dm_env_rpc_pb2.DataType.FLOAT,
tensor_utils.np_type_to_data_type(np.float32))
def test_int32(self):
self.assertEqual(
dm_env_rpc_pb2.DataType.INT32,
tensor_utils.np_type_to_data_type(np.int32))
def test_dtype(self):
self.assertEqual(
dm_env_rpc_pb2.DataType.INT32,
tensor_utils.np_type_to_data_type(np.dtype(np.int32)))
def test_unknown_type(self):
with self.assertRaisesRegex(TypeError, 'dm_env_rpc DataType.*complex64'):
tensor_utils.np_type_to_data_type(np.complex64)
class GetPackerTests(absltest.TestCase):
def test_cannot_get_packer_for_invalid_type(self):
with self.assertRaisesRegex(TypeError, 'complex64'):
tensor_utils.get_packer(np.complex64)
def test_can_pack(self):
packer = tensor_utils.get_packer(np.int32)
tensor = dm_env_rpc_pb2.Tensor()
packer.pack(tensor, np.asarray([1, 2, 3], dtype=np.int32))
self.assertEqual([1, 2, 3], tensor.int32s.array)
def test_can_unpack(self):
packer = tensor_utils.get_packer(np.int32)
tensor = dm_env_rpc_pb2.Tensor()
tensor.int32s.array[:] = [1, 2, 3]
np.testing.assert_array_equal([1, 2, 3], packer.unpack(tensor))
if __name__ == '__main__':
absltest.main()
|
"""
Game logic for Tron game.
<NAME>, Feb 2 2010
minor edits by <NAME>, Jan 2014
"""
import random
class Board:
def __init__(self, w, h, start=None, layout=None, outerwall=True):
''' w: width
h: height
start:
"symrand" for symmetrically random (default)
"random" for totally random
((x1,y1), (x2,y2)) to put p1 at (x1,y1) and p2 at (x2,y2)
layout:
None to have an empty board
a list of strings, one per row of the board, which show the initial
placement of walls and optionally players '''
self.w = w
self.h = h
if layout is not None and start is None:
p1loc = None
p2loc = None
for y,row in enumerate(layout):
for x,c in enumerate(row):
if c == '1':
p1loc = (x,y)
elif c == '2':
p2loc = (x,y)
if p1loc is None and p2loc is None:
self.start = "symrand"
elif p1loc is not None and p2loc is not None:
self.start = (p1loc, p2loc)
else:
raise ValueError("Board is missing a player position!")
elif start is None:
self.start = "symrand"
else:
self.start = start
if layout is None:
self.layout = [' '*w]*h
else:
self.layout = layout
if outerwall:
self.w += 2
self.h += 2
self.layout = ['#'*self.w] + ['#'+row+'#' for row in self.layout] + ['#'*self.w]
if isinstance(self.start, (tuple, list)):
p1, p2 = self.start
self.start = (p1[0]+1, p1[1]+1), (p2[0]+1, p2[1]+1)
def BoardFile(fn):
f = open(fn, "rU")
line = f.readline().split()
w,h = int(line[0]), int(line[1])
layout = []
for i in xrange(h):
layout.append(f.readline().strip('\n'))
return Board(w, h, layout=layout, outerwall=False)
class GameBoard:
MOVES = [None, (0, -1), (1, 0), (0, 1), (-1, 0)]
def __init__(self, template):
w = self.width = template.w
h = self.height = template.h
self.board = map(list, template.layout)
self.board_trail = [list('-')*w for i in xrange(h)]
if template.start in ("symrand", "random"):
free_squares = [(x,y) for x in xrange(w) for y in xrange(h) if self.board[y][x]==' ']
for i in xrange(10):
x,y = random.choice(free_squares)
self.p1loc = x,y
if template.start == "symrand":
self.p2loc = w-1-x, h-1-y
else:
self.p2loc = random.choice(free_squares)
if self.p1loc != self.p2loc and self.board[self.p1loc[1]][self.p1loc[0]] == ' '\
and self.board[self.p2loc[1]][self.p2loc[0]] == ' ':
break
else:
raise Exception("Couldn't place players randomly.")
else:
self.p1loc, self.p2loc = template.start
self.start = self.p1loc, self.p2loc
self.board[self.p1loc[1]][self.p1loc[0]] = '1'
self.board[self.p2loc[1]][self.p2loc[0]] = '2'
self.diff = None
def project(self, pos, delta):
return pos[0]+delta[0], pos[1]+delta[1]
def isfree(self, pos):
return (0 <= pos[0] < self.width and 0 <= pos[1] < self.height) and self.board[pos[1]][pos[0]] == ' '
def move(self, p1move, p2move):
p1loc = self.project(self.p1loc, self.MOVES[p1move])
p2loc = self.project(self.p2loc, self.MOVES[p2move])
self.board_trail[self.p1loc[1]][self.p1loc[0]] = ' NESW'[p1move]
self.board_trail[self.p2loc[1]][self.p2loc[0]] = ' NESW'[p2move]
p1lost = False
p2lost = False
if not self.isfree(p1loc):
p1lost = True
if not self.isfree(p2loc):
p2lost = True
outcome = None
if (p1lost and p2lost) or p1loc == p2loc:
outcome = 'D'
p1move = p2move = 10 # draw
elif p1lost:
outcome = '2'
p1move = 9 # lose
p2move = 8 # win
elif p2lost:
outcome = '1'
p1move = 8
p2move = 9
self.board[self.p1loc[1]][self.p1loc[0]] = '.'
self.board[self.p2loc[1]][self.p2loc[0]] = '*'
self.board[p1loc[1]][p1loc[0]] = chr(128+p1move)
self.board[p2loc[1]][p2loc[0]] = chr(160+p2move)
self.diff = self.p1loc, self.p2loc, p1loc, p2loc
self.p1loc = p1loc
self.p2loc = p2loc
return outcome
def getdims(self):
return '%s %s'%(self.width, self.height)
def getboard(self):
return [''.join(row) for row in self.board]
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import logging
import isceobj
import mroipac
from mroipac.baseline.Baseline import Baseline
from isceobj.Util.decorators import use_api
logger = logging.getLogger('isce.insar.runPreprocessor')
@use_api
def runPreprocessor(self):
master = make_raw(self.master, self.masterdop)
self.insar.rawMasterIQImage = master.iqImage
slave = make_raw(self.slave, self.slavedop)
self.insar.rawSlaveIQImage = slave.iqImage
self._insar.numberRangeBins = master.frame.numberRangeBins
#add raw images to main object
masterRaw = initRawImage(master)
self._insar.setMasterRawImage(masterRaw)
slaveRaw = initRawImage(slave)
self._insar.setSlaveRawImage(slaveRaw)
#add frames to main object
self._insar.setMasterFrame(master.frame)
self._insar.setSlaveFrame(slave.frame)
#add doppler to main object
self._insar.setMasterDoppler(master.getDopplerValues())
self._insar.setSlaveDoppler(slave.getDopplerValues())
#add squints to main object
self._insar.setMasterSquint(master.getSquint())
self._insar.setSlaveSquint(slave.getSquint())
#add look direction
self._insar.setLookSide(master.frame.getInstrument().getPlatform().pointingDirection)
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
frame = self._insar.getMasterFrame()
instrument = frame.getInstrument()
platform = instrument.getPlatform()
planet = platform.getPlanet()
catalog.addInputsFrom(planet, 'planet')
catalog.addInputsFrom(planet.get_elp(), 'planet.ellipsoid')
catalog.addInputsFrom(master.sensor, 'master.sensor')
catalog.addItem('width', masterRaw.getWidth(), 'master')
catalog.addItem('xmin', masterRaw.getXmin(), 'master')
catalog.addItem('iBias', instrument.getInPhaseValue(), 'master')
catalog.addItem('qBias', instrument.getQuadratureValue(), 'master')
catalog.addItem('range_sampling_rate', instrument.getRangeSamplingRate(), 'master')
catalog.addItem('prf', instrument.getPulseRepetitionFrequency(), 'master')
catalog.addItem('pri', 1.0/instrument.getPulseRepetitionFrequency(), 'master')
catalog.addItem('pulse_length', instrument.getPulseLength(), 'master')
catalog.addItem('chirp_slope', instrument.getChirpSlope(), 'master')
catalog.addItem('wavelength', instrument.getRadarWavelength(), 'master')
catalog.addItem('lookSide', platform.pointingDirection, 'master')
catalog.addInputsFrom(frame, 'master.frame')
catalog.addInputsFrom(instrument, 'master.instrument')
catalog.addInputsFrom(platform, 'master.platform')
catalog.addInputsFrom(frame.orbit, 'master.orbit')
frame = self._insar.getSlaveFrame()
instrument = frame.getInstrument()
platform = instrument.getPlatform()
catalog.addInputsFrom(slave.sensor, 'slave.sensor')
catalog.addItem('width', slaveRaw.getWidth(), 'slave')
catalog.addItem('xmin', slaveRaw.getXmin(), 'slave')
catalog.addItem('iBias', instrument.getInPhaseValue(), 'slave')
catalog.addItem('qBias', instrument.getQuadratureValue(), 'slave')
catalog.addItem('range_sampling_rate', instrument.getRangeSamplingRate(), 'slave')
catalog.addItem('prf', instrument.getPulseRepetitionFrequency(), 'slave')
catalog.addItem('pri', 1.0/instrument.getPulseRepetitionFrequency(), 'slave')
catalog.addItem('pulse_length', instrument.getPulseLength(), 'slave')
catalog.addItem('chirp_slope', instrument.getChirpSlope(), 'slave')
catalog.addItem('wavelength', instrument.getRadarWavelength(), 'slave')
catalog.addItem('lookSide', platform.pointingDirection, 'slave')
catalog.addInputsFrom(frame, 'slave.frame')
catalog.addInputsFrom(instrument, 'slave.instrument')
catalog.addInputsFrom(platform, 'slave.platform')
catalog.addInputsFrom(frame.orbit, 'slave.orbit')
optlist = ['all', 'top', 'middle', 'bottom']
success=False
baseLocation = None
for option in optlist:
baseObj = Baseline()
baseObj.configure()
baseObj.baselineLocation = option
baseObj.wireInputPort(name='masterFrame',object=self._insar.getMasterFrame())
baseObj.wireInputPort(name='slaveFrame',object=self._insar.getSlaveFrame())
try:
baseObj.baseline()
success=True
baseLocation=option
except:
print('Baseline computation with option {0} Failed'.format(option))
pass
if success:
break
if not success:
raise Exception('Baseline computation failed with all possible options. Images may not overlap.')
catalog.addItem('horizontal_baseline_top', baseObj.hBaselineTop, 'baseline')
catalog.addItem('horizontal_baseline_rate', baseObj.hBaselineRate, 'baseline')
catalog.addItem('horizontal_baseline_acc', baseObj.hBaselineAcc, 'baseline')
catalog.addItem('vertical_baseline_top', baseObj.vBaselineTop, 'baseline')
catalog.addItem('vertical_baseline_rate', baseObj.vBaselineRate, 'baseline')
catalog.addItem('vertical_baseline_acc', baseObj.vBaselineAcc, 'baseline')
catalog.addItem('perp_baseline_top', baseObj.pBaselineTop, 'baseline')
catalog.addItem('perp_baseline_bottom', baseObj.pBaselineBottom, 'baseline')
catalog.addItem('baseline_location', baseLocation, 'baseline')
catalog.printToLog(logger, "runPreprocessor")
self._insar.procDoc.addAllFromCatalog(catalog)
def make_raw(sensor, doppler):
from make_raw import make_raw
objMakeRaw = make_raw()
objMakeRaw(sensor=sensor, doppler=doppler)
return objMakeRaw
def initRawImage(makeRawObj):
from isceobj.Image import createSlcImage
from isceobj.Image import createRawImage
#the "raw" image in same case is an slc.
#for now let's do it in this way. probably need to make this a factory
#instantiated based on the sensor type
imageType = makeRawObj.frame.getImage()
if isinstance(imageType, createRawImage().__class__):
filename = makeRawObj.frame.getImage().getFilename()
bytesPerLine = makeRawObj.frame.getImage().getXmax()
goodBytes = makeRawObj.frame.getImage().getXmax() - makeRawObj.frame.getImage().getXmin()
logger.debug("bytes_per_line: %s" % (bytesPerLine))
logger.debug("good_bytes_per_line: %s" % (goodBytes))
objRaw = createRawImage()
objRaw.setFilename(filename)
objRaw.setNumberGoodBytes(goodBytes)
objRaw.setWidth(bytesPerLine)
objRaw.setXmin(makeRawObj.frame.getImage().getXmin())
objRaw.setXmax(bytesPerLine)
elif(isinstance(imageType,createSlcImage().__class__)):
objRaw = createSlcImage()
filename = makeRawObj.frame.getImage().getFilename()
bytesPerLine = makeRawObj.frame.getImage().getXmax()
objRaw.setFilename(filename)
objRaw.setWidth(bytesPerLine)
objRaw.setXmin(makeRawObj.frame.getImage().getXmin())
objRaw.setXmax(bytesPerLine)
return objRaw
|
<reponame>zhangqf-lab/RIP-icSHAPE-MaP
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def ref_profile(MD_tag):
"""
MD_tag -- 59A11/51^CT18/6G4C20G1A5C5A1^C3A15G1G15
Return the profile corresponding to the raw sequence
"""
profile = ""
current = ""
deletion_state = False
for alpha in list(MD_tag):
if '0'<=alpha<='9':
current += alpha
deletion_state = False
else:
if current:
profile += "0"*int(current)
current = ""
if alpha == '^':
deletion_state = True
elif alpha in ('A','T','C','G'):
if deletion_state:
profile += alpha
else:
profile += '1'
if current:
profile += "0"*int(current)
print(profile)
def mutate_count(Cigar, MD_tag):
counts = 0
deletion_state = False
for alpha in list(MD_tag):
if '0'<=alpha<='9':
deletion_state = False
else:
if alpha == '^':
deletion_state = True
counts += 1
elif alpha in ('A','T','C','G'):
if not deletion_state:
counts += 1
counts += Cigar.count('I')
return counts
##### miRNA and 5SrRNA
def count_mutevent_num(inBam, ratio=0.3):
mutevent_num = []
if ratio == None:
commands = "samtools view %s" % (inBam, )
else:
commands = "samtools view -s %s %s" % (ratio, inBam)
for line in os.popen(commands):
data = line.strip().split()
ref_id, Cigar, MD_tag = data[2], data[5], data[16]
if ref_id.startswith('miRNA') or ref_id.startswith('rRNA_human_5S'):
MD_tag = MD_tag.lstrip("MD:Z:")
mut_count = mutate_count(Cigar, MD_tag)
mutevent_num.append(mut_count)
return sorted(mutevent_num)
def get_ratio_list(input_list):
length = len(input_list)
n0 = input_list.count(0)
n1 = input_list.count(1)
n2 = input_list.count(2)
n3 = input_list.count(3)
n4 = input_list.count(4)
n5 = input_list.count(5)
nl6 = length-n0-n1-n2-n3-n4-n5
ratio = [ 1.0*n0/length, 1.0*n1/length, 1.0*n2/length, 1.0*n3/length, 1.0*n4/length, 1.0*n5/length, 1.0*nl6/length ]
numlist = [ n0, n1, n2, n3, n4, n5, nl6 ]
ratio = [ round(it, 5) for it in ratio ]
return ratio, numlist
ROOT = "/Share/home/zhangqf7/lipan/precursor_SHAPEMAP/Final-Dicer-Run/"
DMSO_SMR_SSII_rep1 = ROOT+"NAI_100mm_vivo_SMR_SSII_repX/mapping/DMSO_1.clean.Aligned.sortedByCoord.out.bam"
DMSO_SMR_SSII_rep2 = ROOT+"NAI_100mm_vivo_SMR_SSII_repX/mapping/DMSO_2.clean.Aligned.sortedByCoord.out.bam"
NAI_100mm_vivo_SMR_SSII_rep1 = ROOT+"NAI_100mm_vivo_SMR_SSII_repX/mapping/INVIVO_1.clean.Aligned.sortedByCoord.out.bam"
NAI_100mm_vivo_SMR_SSII_rep2 = ROOT+"NAI_100mm_vivo_SMR_SSII_repX/mapping/INVIVO_2.clean.Aligned.sortedByCoord.out.bam"
NAI_100mm_vitro_SMR_SSII_rep1 = ROOT+"NAI_100mm_vitro_SMR_SSII_repX/mapping/INVITRO_1.clean.Aligned.sortedByCoord.out.bam"
NAI_100mm_vitro_SMR_SSII_rep2 = ROOT+"NAI_100mm_vitro_SMR_SSII_repX/mapping/INVITRO_2.clean.Aligned.sortedByCoord.out.bam"
sample_names = [ 'DMSO_SMR_SSII_rep1','DMSO_SMR_SSII_rep2',
'NAI_100mm_vivo_SMR_SSII_rep1','NAI_100mm_vivo_SMR_SSII_rep2',
'NAI_100mm_vitro_SMR_SSII_rep1','NAI_100mm_vitro_SMR_SSII_rep2' ]
##########################
### Read data
##########################
MutEventCount = {}
for sample_name in sample_names:
print(sample_name)
MutEventCount[sample_name] = count_mutevent_num(eval(sample_name), ratio=None)
##########################
### Count
##########################
sample_rl_list = []
sample_num_list = []
for sample_name in sample_names:
rl, num = get_ratio_list(MutEventCount[sample_name])
sample_rl_list.append(rl)
sample_num_list.append(num)
rl_df = pd.DataFrame(sample_rl_list, columns=['0','1','2','3','4','5','>=6'])
rl_df.index = sample_names
rl_df.to_csv("/Share/home/zhangqf7/figs/rl_df.csv", sep="\t")
num_df = pd.DataFrame(sample_num_list, columns=['0','1','2','3','4','5','>=6'])
num_df.index = sample_names
num_df.to_csv("/Share/home/zhangqf7/figs/num_df.csv", sep="\t")
##########################
### Plot 2 -- barplot : Use all mutations, not 1-2 mutations
##########################
dmso_0_rep1 = rl_df.loc['DMSO_SMR_SSII_rep1','0']
dmso_12_rep1 = 1-dmso_0_rep1
dmso_0_rep2 = rl_df.loc['DMSO_SMR_SSII_rep2','0']
dmso_12_rep2 = 1 - dmso_0_rep2
vivo_0_rep1 = rl_df.loc['NAI_100mm_vivo_SMR_SSII_rep1','0']
vivo_12_rep1 = 1 - vivo_0_rep1
vivo_0_rep2 = rl_df.loc['NAI_100mm_vivo_SMR_SSII_rep2','0']
vivo_12_rep2 = 1 - vivo_0_rep2
vitro_0_rep1 = rl_df.loc['NAI_100mm_vitro_SMR_SSII_rep1','0']
vitro_12_rep1 = 1 - vitro_0_rep1
vitro_0_rep2 = rl_df.loc['NAI_100mm_vitro_SMR_SSII_rep2','0']
vitro_12_rep2 = 1 - vitro_0_rep2
data = []
data.append( (dmso_0_rep1, "dmso", "rep1", "Without mutates") )
data.append( (dmso_12_rep1, "dmso", "rep1", "With mutates") )
data.append( (dmso_0_rep2, "dmso", "rep2", "Without mutates") )
data.append( (dmso_12_rep2, "dmso", "rep2", "With mutates") )
data.append( (vivo_0_rep1, "vivo", "rep1", "Without mutates") )
data.append( (vivo_12_rep1, "vivo", "rep1", "With mutates") )
data.append( (vivo_0_rep2, "vivo", "rep2", "Without mutates") )
data.append( (vivo_12_rep2, "vivo", "rep2", "With mutates") )
data.append( (vitro_0_rep1, "vitro", "rep1", "Without mutates") )
data.append( (vitro_12_rep1, "vitro", "rep1", "With mutates") )
data.append( (vitro_0_rep2, "vitro", "rep2", "Without mutates") )
data.append( (vitro_12_rep2, "vitro", "rep2", "With mutates") )
df_data = pd.DataFrame(data=data, columns=['ratio','type','rep','mutnum'])
sns.barplot(data=df_data, x='type', y='ratio', hue='mutnum')
plt.savefig("figs/xxx.pdf")
plt.show()
|
import numpy as np
import pandas as pd
class VisualizationModel(dict):
'''
classifier_ = {'type':'quantiles', 'category_count': 4, 'color_grades':[(.9,.9,.9),(0,0,1.0)], 'radii_grades': [25,300] }
'''
def __init__(self, inpt={}):
super(VisualizationModel, self).__init__(inpt)
# set up quartile as default
if not 'type' in self:
self['type']='quantiles'
if not 'category_count' in self:
self['category_count']=4
if not 'color_grades' in self:
self['color_grades'] = [(.7,.7,.7),(0,0,1.0)] # ltgrey to blue
if not 'radii_grades' in self:
self['radii_grades'] = [25,400] #low to high
if not 'breaks' in self:
self['breaks']=[]
def setColors(self, colorHigh=None, colorLow=None):
if colorLow != None:
self['color_grades'][0] = colorLow
if colorHigh != None:
self['color_grades'][1] = colorHigh
def setRadii(self, high=None, low=None):
if low != None:
self['radii_grades'][0] = low
if high != None:
self['radii_grades'][1] = high
def getColors(self, colorHigh=None, colorLow=None):
return []
def getRadii(self,minRadius=None,maxRadius=None):
return []
def getBreaks(self,_series):
return []
class QuantileVisualizationModel(VisualizationModel):
def __init__(self, inpt={}):
super(QuantileVisualizationModel, self).__init__(inpt)
# set up quartile as default
if not 'type' in self:
self['type'] = 'quantiles'
if not 'category_count' in self:
self['category_count'] = 4
if not 'color_grades' in self:
self['color_grades'] = [(.9, .9, .9), (0, 0, 1.0)] # ltgrey to blue
if not 'radii_grades' in self:
self['radii_grades'] = [25, 300] # low to high
class CategoryVisualizationModel(VisualizationModel):
def __init__(self, inpt={}):
super(CategoryVisualizationModel, self).__init__(inpt)
# set up quartile as default
if not 'type' in self:
self['type'] = 'categories'
if not 'category_count' in self:
self['category_count'] = 4
if not 'color_grades' in self:
self['color_grades'] = [(.9, .9, .9), (0, 0, 1.0)] # ltgrey to blue
if not 'radii_grades' in self:
self['radii_grades'] = [25, 300] # low to high
class GradientModel(VisualizationModel):
'''
dict is {'type':'quartiles', 'category_count': 4, 'color_grades':colors, 'radii_grades': radii }
# colors = (1,1,1) if tuple then solid single
# colors = [(1,1,1),(1,1,1)] if list and size == 2 low and high
# colors = [(1,1,1),(1,1,1),(1,1,1),...] if list and size > 2 the fixed colors
# radii = 25 if not list then single
# radii = [minRadii,maxRadii] if list and size == 2 high and low
color is (r,g,b)
r = is value 0.0 to 1.0
g = is value 0.0 to 1.0
b = is valur 0.0 to 1.0
examples:
ltGrey = (0.9,0.9,0.9)
pureGreen = (0,1.0,0)
pureBlue = (8/254, 123/254, 157/254)
pureYellowGreen = (0,84/254,166/254)
pureWhite = (1.0,1.0,1.0)
pureRed = (1.0,0,0)
'''
def __init__(self, inpt={}):
super(GradientModel, self).__init__(inpt)
def getLtGrey(self):
return (0.9, 0.9, 0.9)
def getGreen(self):
return (0, 1.0, 0)
def getBlue(self):
return (0,0,1.0)
def getRed(self):
return (1.0,0,0)
def __arrayMultiply(self, array, c):
return [element * c for element in array]
def __arraySum(self, a, b):
return map(sum, zip(a, b)) # add a to b
def __intermediate(self, a, b, ratio):
aComponent = self.__arrayMultiply(a, ratio)
bComponent = self.__arrayMultiply(b, 1 - ratio)
return tuple(self.__arraySum(aComponent, bComponent))
def getColors(self, colorHigh=None, colorLow=None):
if colorHigh == None:
colorHigh = self['color_grades'][1]
if colorLow == None:
colorLow = self['color_grades'][0]
steps = self['category_count']
steps = [n / float(steps) for n in range(steps)]
colors = []
if colorHigh == None:
colorHigh = self['color_grades'][1]
if colorLow == None:
colorLow = self['color_grades'][0]
for step in steps:
colors.append(self.__intermediate(colorHigh, colorLow, step))
return colors
def getRadii(self,minRadius=None,maxRadius=None):
if minRadius==None:
minRadius = self['radii_grades'][0]
if maxRadius == None:
maxRadius = self['radii_grades'][1]
steps = self['category_count']
step = (maxRadius - minRadius)/steps
rc = np.arange(minRadius,maxRadius,step)
rc[len(rc)-1] = maxRadius
return rc
def getLegendLabels(self, breaks):
rc = []
bottom = 0
for b in breaks:
if bottom == 0:
label_str = '< {:0.4f}'.format( b)
else:
label_str = '{:0.4f} to {:0.4f}'.format(bottom, b)
rc.append(label_str)
bottom = b
label_str = '{:0.4f} +'.format(bottom)
rc.append(label_str)
return rc
def deprecated_getLegendLabels(self, breaks):
rc = []
bottom = 0
for b in breaks:
if bottom == 0:
label_str = '< {:0.4f}'.format( b)
else:
label_str = '{:0.4f} to {:0.4f}'.format(bottom, b)
rc.append(label_str)
bottom = b
label_str = '{:0.4f} +'.format(bottom)
rc.append(label_str)
return rc
def getBreaks(self,_series):
steps = self['category_count']
if isinstance(_series, list):
#print('convert list to series')
_series = pd.Series(_series)
print('type(_series): ',type(_series))
if self['type'] == 'quantiles':
q = np.arange(0.0, 1.0, 1.0/steps)
br = _series.quantile(q)
#print('q: ',q)
#print('br: ', list(br)[1:])
#self['breaks'] = br
self['breaks'] = list(br)[1:]
#_series.quantile([0.25, 0.5, 0.75])
#self['breaks'] = _series.quantile([0.25, 0.5, 0.75])
else:
msg = 'Unknown type found in quantiles...{}'.format(self['type'])
raise AttributeError(msg)
return self['breaks']
class CategoryFactory:
'''
make lis ofcolors per data itme
make list of radii per data item
'''
default_color = (0,1.0,0) # black
default_radius = 25
default_category = 0
#def __init__(self, inpt={}):
# super(CategoryFactory, self).__init__(inpt)
def getDataCategories(self,_series,model):
if isinstance(_series, list):
_series = pd.Series(_series)
rc = []
colors = model.getColors()
radii = model.getRadii()
breaks = model.getBreaks(_series)
# catigorize, expect low open endde, upper is open ended
# classes [c1, c2, c3, c4]
# four classes create 3 breaks [ b1, b2, b3 ]
category_list=[]
color_list=[]
radii_list=[]
last_pos = len(colors)-1
for v in _series: # values
i = 0
clr = colors[last_pos]# set default color
rad = radii[last_pos] #self.default_radius
k = len(breaks)
for b in breaks:
if v < b:
#
clr = colors[i] #self['colors'][i]
rad = radii[i] #self['radii'][i]
k = i
break
i += 1
color_list.append(clr)#self['color_list'].append(clr)
radii_list.append(rad)
return (color_list, radii_list)
def test_CategoryFactory():
data = [x for x in range(0,8)]
print('data: ', data)
visModel = GradientModel(QuantileVisualizationModel())
category_factory = CategoryFactory()
cat_data = category_factory.getDataCategories(data,visModel)
print('data cats: ')
print('data cats: ',cat_data)
def test_ColorGradient():
print('############ test_ColorGradient')
vals = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
colors = GradientModel(VisualizationModel()).getColors() #.gradientColor(pureBlue, pureRed, 6)
print('colors: ',colors )
def test_gradientRadii():
print('############ test_gradientRadii')
vals = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
radii = GradientModel(QuantileVisualizationModel()).getRadii() #.gradientRadii(25,100,4)
print('radii: ', radii)
def test_getBreaks():
print('############ test_getBreaks')
vals = [1,2,3,4,5,6,7,8,9,10]
gradient = GradientModel(QuantileVisualizationModel())
gradient.getBreaks(vals)
print('gradient: ', gradient)
def test_VisualizationModel():
print('############ test_VisualizationModel')
visModel = VisualizationModel({'category_count':5})
print('quart', visModel)
def test_QuantileVisualizationModel():
print('############ test_QuantileVisualizationModel')
visModel = QuantileVisualizationModel()
print('quart', visModel)
def main():
test_VisualizationModel()
test_ColorGradient()
test_gradientRadii()
test_getBreaks()
test_QuantileVisualizationModel()
test_QuantileVisualizationModel()
test_CategoryFactory()
if __name__ == "__main__":
# execute only if run as a script
main() |
<gh_stars>0
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
import mrcnn.utils
import mrcnn.model as modellib
from mrcnn import visualize
# Root directory of the project
ROOT_DIR = os.path.abspath("./")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
from samples.coco import coco
#%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
mrcnn.utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = "./dataset/input"
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
# names
names = []
indexes = []
for root, dirnames, filenames in os.walk(IMAGE_DIR):
for filename in filenames:
if filename.endswith('.jpg'):
if os.path.isfile("./out/" + filename.replace(".jpg", ".png")):
print("skipping " + filename)
continue
idx = int(filename.replace(".jpg", ""))
indexes.append(idx)
names.append(filename)
if len(names) == 0:
exit(1)
# images
for i in range((len(names))):
filename = names[i]
idx = indexes[i]
image = skimage.io.imread(os.path.join(IMAGE_DIR, filename))
results = model.detect([image], verbose=1)
r = results[0]
visualize.display_instances(idx, image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
|
<gh_stars>0
import random
# The Game Board
def showBoard():
print("+---+---+---+")
print("|", board[0], "|", board[1], "|", board[2], "|")
print("+---+---+---+")
print("|", board[3], "|", board[4], "|", board[5], "|")
print("+---+---+---+")
print("|", board[6], "|", board[7], "|", board[8], "|")
print("+---+---+---+")
def isWinner(char):
#horizental check
if board[0] == char and board[1] == char and board[2] == char:
return True
elif board[3] == char and board[4] == char and board[5] == char:
return True
elif board[6] == char and board[7] == char and board[8] == char:
return True
# vertical check
if board[0] == char and board[3] == char and board[6] == char:
return True
elif board[1] == char and board[4] == char and board[7] == char:
return True
elif board[2] == char and board[5] == char and board[8] == char:
return True
# diagonal check
if board[0] == char and board[4] == char and board[8] == char:
return True
elif board[2] == char and board[4] == char and board[6] == char:
return True
#to check if the board has been filled or not
def isFull():
if board.count(' ') == 0:
return True
else:
return False
def selectLetter():
letter = ''
while not (letter == 'X' or letter == 'O'):
print('Do you want to be X or O?')
letter = input().upper()
return letter
def toss(char):
if random.randint(0, 1) == char:
return 0
else:
return 1
#Main program start here
done = False
while not done:
board = [' '] * 9
# choosing letter for players
player = []
player1, player2 = '', ''
if selectLetter() == 'X':
player = ['X', 'O']
else:
player = ['O', 'X']
# Toss
print('*'*5, 'TOSS', '*'*5)
print(player[0], 'please type 1 for head or 0 for tail')
x = int(input('>>> '))
if toss(x) == x:
print(player[0], 'won the toss')
player1 = player[0]
player2 = player[1]
else:
print(player[1], 'won the toss')
player1 = player[1]
player2 = player[0]
while True: #inner game loop
showBoard()
while True:
print(player1,'\'s turn. ', end = '')
X = int(input("Please Enter position: "))
if board[X] != 'X' and board[X] != 'O':
board[X] = player1
break
else:
print('Already Taken')
#check whether X Wins
if isWinner(player1):
showBoard()
print(player1, 'Won!')
break
if isFull():
showBoard()
print('Board is full..No one wins')
break
showBoard()
while True:
print(player2,'\'s turn. ', end = '')
O = int(input("Please Enter position: "))
if board[O] != 'X' and board[O] != 'O':
board[O] = player2
break
else:
print('Already Taken!')
if isWinner(player2):
showBoard()
print(player2, 'won!')
break
if isFull():
showBoard()
print('Board is full..No one wins')
break
option = input('Do you want to continue? (y/n)')
if option.lower() == 'y':
continue
else:
print('Thank you for playing Tic Tac Toe. See you again :)')
break
|
import os
import pickle
import timeit
import cProfile
import itertools
import threading
from math import ceil
from binascii import hexlify
from time import perf_counter as now
import synapse.cortex as s_cortex
from numpy import random
NUM_PREEXISTING_TUFOS = 1000
NUM_TUFOS = 100000
NUM_ONE_AT_A_TIME_TUFOS = 100
HUGE_VAL_BYTES = 1000000
HUGE_VAL_RATE = 0.0001
LARGE_VAL_BYTES = 10000
LARGE_VAL_RATE = 0.005
MEDIUM_VAL_BYTES = 100
MEDIUM_VAL_RATE = .1949
SMALL_VAL_BYTES = 5
SMALL_VAL_RATE = .80
# what percent of properties will have integer value
INTEGER_VAL_RATE = .20
AVG_PROPS_PER_TUFO = 7
AVG_PROP_NAME_LEN = 11
NUM_THREADS = 4
NUM_FORMS = 20
def _addRows(rows, core, one_at_a_time=False):
if one_at_a_time:
for row in rows:
core.addRows([row])
else:
core.addRows(rows)
# core.flush()
def _getTufosByIdens(idens, core):
core.getTufosByIdens(idens)
def _getTufoByPropVal(propvals, core):
for p, v in propvals:
core.getTufoByProp(p, v)
def random_normal(avg):
''' Returns a number with normal distribution around avg, the very fast way '''
return random.randint(1, avg) + random.randint(0, avg + 1)
def random_string(avg):
num_letters = random_normal(avg)
return ''.join(chr(random.randint(ord('a'), ord('a') + 25)) for x in range(num_letters))
small_count = 0
medium_count = 0
large_count = 0
huge_count = 0
def random_val_len():
global small_count, medium_count, large_count, huge_count
x = random.random()
prob = SMALL_VAL_RATE
if x < prob:
small_count += 1
return SMALL_VAL_BYTES
prob += MEDIUM_VAL_RATE
if x < prob:
medium_count += 1
return MEDIUM_VAL_BYTES
prob += LARGE_VAL_RATE
if x < prob:
large_count += 1
return LARGE_VAL_BYTES
huge_count += 1
return HUGE_VAL_BYTES
def gen_random_form():
num_props = random_normal(AVG_PROPS_PER_TUFO)
props = [random_string(AVG_PROP_NAME_LEN) for x in range(num_props)]
return props
def gen_random_tufo(form):
iden = hexlify(random.bytes(16)).decode('utf8')
props = {}
for propname in form:
if random.random() <= INTEGER_VAL_RATE:
val = random.randint(-2 ** 62, 2 ** 63)
else:
val = random_string(random_val_len())
props[propname] = val
return (iden, props)
def _rows_from_tufo(tufo):
timestamp = random.randint(1, 2 ** 63)
rows = []
iden = tufo[0]
for p, v in tufo[1].items():
rows.append((iden, p, v, timestamp))
return rows
def flatten(iterable):
return list(itertools.chain.from_iterable(iterable))
def _prepopulate_core(core, rows):
core.addRows(rows)
def nth(iterable, n):
"Returns the nth item or a default value"
return next(itertools.islice(iterable, n, None))
def get_random_keyval(d):
i = random.randint(0, len(d))
key = nth(d.keys(), i)
return (key, d[key])
class TestData:
def __init__(self, test_data_fn):
start = now()
if os.path.isfile(test_data_fn):
print("Reading test data...")
self.prepop_rows, self.idens, self.props, self.rows = \
pickle.load(open(test_data_fn, 'rb'))
else:
print("Generating test data...")
random.seed(4) # 4 chosen by fair dice roll. Guaranteed to be random
forms = [gen_random_form() for x in range(NUM_FORMS)]
# FIXME: don't use random.choice!!! Super duper slow
self.prepop_rows = flatten(_rows_from_tufo(gen_random_tufo(random.choice(forms)))
for x in range(NUM_PREEXISTING_TUFOS))
tufos = [gen_random_tufo(random.choice(forms)) for x in range(NUM_TUFOS)]
self.idens = [t[0] for t in tufos]
self.props = [get_random_keyval(t[1]) for t in tufos]
random.shuffle(self.idens)
random.shuffle(self.props)
self.rows = flatten(_rows_from_tufo(x) for x in tufos)
pickle.dump((self.prepop_rows, self.idens, self.props, self.rows),
open(test_data_fn, 'wb'))
print("Test data generation took: %.2f" % (now() - start))
print('addRows: # Tufos:%8d, # Rows: %8d' % (NUM_TUFOS, len(self.rows)))
print('len count: small:%d, medium:%d, large:%d, huge:%d' %
(small_count, medium_count, large_count, huge_count))
def _run_x(func, data, num_threads, *args, **kwargs):
chunk_size = ceil(len(data) / num_threads)
chunks = [data[i:i + chunk_size] for i in range(0, len(data), chunk_size)]
threads = [threading.Thread(target=func, args=[chunks[x]] + list(args), kwargs=kwargs)
for x in range(num_threads)]
for i in range(num_threads):
threads[i].start()
for i in range(num_threads):
threads[i].join()
def do_it(cmd, data_str, num_threads, globals, number, repeat, divisor):
if num_threads == 1:
times = timeit.repeat('%s(%s, core)' % (cmd, data_str), globals=globals, number=number, repeat=repeat)
else:
times = timeit.repeat('_run_x(%s, %s, %s, core=core)' % (cmd, data_str, num_threads), globals=globals,
number=number, repeat=repeat)
print_time(cmd, times, divisor)
def profile_it(cmd, globals, number, repeat, divisor):
cProfile.runctx(cmd, globals, {}, filename='lmdb_02.prof')
def benchmark_cortex(test_data, url, cleanup_func, num_threads=1):
core = s_cortex.openurl(url)
_prepopulate_core(core, test_data.prepop_rows)
g = {'_addRows': _addRows, '_getTufosByIdens': _getTufosByIdens, 'core': core,
'test_data': test_data, '_getTufoByPropVal': _getTufoByPropVal, '_run_x': _run_x}
do_it('_addRows', 'test_data.rows', num_threads, g, 1, 1, len(test_data.rows))
if cleanup_func:
del core
core = s_cortex.openurl(url)
g['core'] = core
do_it('_getTufosByIdens', 'test_data.idens', num_threads, g, 2, 5, NUM_TUFOS)
do_it('_getTufoByPropVal', 'test_data.props', num_threads, g, 2, 5, NUM_TUFOS)
if cleanup_func:
cleanup_func()
def print_time(label, times, divisor):
t = min(times)
print('%50s: %8.2f (max=%7.2f) %7d %10.6f' % (label, t, max(times), divisor, t / divisor))
LMDB_FILE = 'test.lmdb'
SQLITE_FILE = 'test.sqlite'
def cleanup_lmdb():
try:
os.remove(LMDB_FILE)
os.remove(LMDB_FILE + '-lock')
except OSError:
pass
def cleanup_sqlite():
try:
os.remove('test.sqlite')
except OSError:
pass
def benchmark_all(which_runs, num_threads):
runs = (
('ram://', None),
('sqlite:///:memory:', None),
('sqlite:///' + SQLITE_FILE, cleanup_sqlite),
('lmdb:///%s?lmdb:mapsize=536870912&lmdb:mapslack=536870912' % LMDB_FILE, cleanup_lmdb)
)
test_data = TestData('testdata')
for i, (url, cleanup_func) in enumerate(runs):
if i not in which_runs:
continue
print('%s-threaded benchmarking: %s' % (num_threads, url))
benchmark_cortex(test_data, url, cleanup_func, num_threads)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('which_runs', type=int, nargs='*', default=(0, 1, 2, 3))
parser.add_argument('--num-threads', type=int, default=1)
opts = parser.parse_args()
benchmark_all(opts.which_runs, opts.num_threads)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.