input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- coding: utf-8 -*-
import logging
import vtool as vt
import numpy as np
import utool as ut
from vtool._pyflann_backend import pyflann as pyflann
from os.path import basename, exists # NOQA
from wbia.algo.hots import neighbor_index_cache
# import mem_top
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
def augment_nnindexer_experiment():
"""
References:
http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/
CommandLine:
utprof.py -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
# RUNS THE SEGFAULTING CASE
python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
# Debug it
gdb python
run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
gdb python
run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.hots._neighbor_experiment import * # NOQA
>>> # execute function
>>> augment_nnindexer_experiment()
>>> # verify results
>>> ut.show_if_requested()
"""
import wbia
# build test data
# ibs = wbia.opendb('PZ_MTEST')
ibs = wbia.opendb(defaultdb='PZ_Master0')
if ibs.get_dbname() == 'PZ_MTEST':
initial = 1
addition_stride = 4
max_ceiling = 100
elif ibs.get_dbname() == 'PZ_Master0':
initial = 128
# addition_stride = 64
# addition_stride = 128
addition_stride = 256
max_ceiling = 10000
# max_ceiling = 4000
# max_ceiling = 2000
# max_ceiling = 600
else:
assert False
all_daids = ibs.get_valid_aids(species='zebra_plains')
qreq_ = ibs.new_query_request(all_daids, all_daids)
max_num = min(max_ceiling, len(all_daids))
# Clear Caches
ibs.delete_flann_cachedir()
neighbor_index_cache.clear_memcache()
neighbor_index_cache.clear_uuid_cache(qreq_)
# Setup
all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
# ensure all features are computed
nnindexer_list = []
addition_lbl = 'Addition'
_addition_iter = list(range(initial + 1, max_num, addition_stride))
addition_iter = iter(
ut.ProgressIter(_addition_iter, lbl=addition_lbl, freq=1, autoadjust=False)
)
time_list_addition = []
# time_list_reindex = []
addition_count_list = []
tmp_cfgstr_list = []
# for _ in range(80):
# next(addition_iter)
try:
memtrack = ut.MemoryTracker(disable=False)
for count in addition_iter:
aid_list_ = all_randomize_daids_[0:count]
# Request an indexer which could be an augmented version of an existing indexer.
with ut.Timer(verbose=False) as t:
memtrack.report('BEFORE AUGMENT')
nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer(
qreq_, aid_list_
)
memtrack.report('AFTER AUGMENT')
nnindexer_list.append(nnindexer_)
addition_count_list.append(count)
time_list_addition.append(t.ellapsed)
tmp_cfgstr_list.append(nnindexer_.cfgstr)
logger.info('===============\n\n')
logger.info(ut.repr2(time_list_addition))
logger.info(ut.repr2(list(map(id, nnindexer_list))))
logger.info(ut.repr2(tmp_cfgstr_list))
logger.info(ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))
IS_SMALL = False
if IS_SMALL:
nnindexer_list = []
reindex_label = 'Reindex'
# go backwards for reindex
_reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1]
reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label)
time_list_reindex = []
# time_list_reindex = []
reindex_count_list = []
for count in reindex_iter:
logger.info('\n+===PREDONE====================\n')
# check only a single size for memory leaks
# count = max_num // 16 + ((x % 6) * 1)
# x += 1
aid_list_ = all_randomize_daids_[0:count]
# Call the same code, but force rebuilds
memtrack.report('BEFORE REINDEX')
with ut.Timer(verbose=False) as t:
nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer(
qreq_, aid_list_, force_rebuild=True, memtrack=memtrack
)
memtrack.report('AFTER REINDEX')
ibs.print_cachestats_str()
logger.info(
'[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s'
% (ut.get_object_size_str(neighbor_index_cache.NEIGHBOR_CACHE.items()),)
)
logger.info(
'[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s'
% (len(neighbor_index_cache.NEIGHBOR_CACHE.items()),)
)
logger.info(
'[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s'
% (ut.get_object_size_str(neighbor_index_cache.UUID_MAP_CACHE),)
)
logger.info('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_))
memtrack.report_type(neighbor_index_cache.NeighborIndex)
ut.print_object_size_tree(nnindexer_, lbl='nnindexer_')
if IS_SMALL:
nnindexer_list.append(nnindexer_)
reindex_count_list.append(count)
time_list_reindex.append(t.ellapsed)
# import cv2
# import matplotlib as mpl
# logger.info(mem_top.mem_top(limit=30, width=120,
# #exclude_refs=[cv2.__dict__, mpl.__dict__]
# ))
logger.info('L___________________\n\n\n')
logger.info(ut.repr2(time_list_reindex))
if IS_SMALL:
logger.info(ut.repr2(list(map(id, nnindexer_list))))
logger.info(ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))
except KeyboardInterrupt:
logger.info('\n[train] Caught CRTL+C')
resolution = ''
while not (resolution.isdigit()):
logger.info('\n[train] What do you want to do?')
logger.info('[train] 0 - Continue')
logger.info('[train] 1 - Embed')
logger.info('[train] ELSE - Stop network training')
resolution = input('[train] Resolution: ')
resolution = int(resolution)
# We have a resolution
if resolution == 0:
logger.info('resuming training...')
elif resolution == 1:
ut.embed()
import wbia.plottool as pt
next_fnum = iter(range(0, 1)).next # python3 PY3
pt.figure(fnum=next_fnum())
if len(addition_count_list) > 0:
pt.plot2(
addition_count_list,
time_list_addition,
marker='-o',
equal_aspect=False,
x_label='num_annotations',
label=addition_lbl + ' Time',
)
if len(reindex_count_list) > 0:
pt.plot2(
reindex_count_list,
time_list_reindex,
marker='-o',
equal_aspect=False,
x_label='num_annotations',
label=reindex_label + ' Time',
)
pt.set_figtitle('Augmented indexer experiment')
pt.legend()
def flann_add_time_experiment():
"""
builds plot of number of annotations vs indexer build time.
TODO: time experiment
CommandLine:
python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show
python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show
utprof.py -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show
valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.hots._neighbor_experiment import * # NOQA
>>> import wbia
>>> #ibs = wbia.opendb('PZ_MTEST')
>>> result = flann_add_time_experiment()
>>> # verify results
>>> print(result)
>>> ut.show_if_requested()
"""
import wbia
import utool as ut
import numpy as np
import wbia.plottool as pt
def make_flann_index(vecs, flann_params):
flann = pyflann.FLANN()
flann.build_index(vecs, **flann_params)
return flann
db = ut.get_argval('--db')
ibs = wbia.opendb(db=db)
# Input
if ibs.get_dbname() == 'PZ_MTEST':
initial = 1
reindex_stride = 16
addition_stride = 4
max_ceiling = 120
elif ibs.get_dbname() == 'PZ_Master0':
# ibs = wbia.opendb(db='GZ_ALL')
initial = 32
reindex_stride = 32
addition_stride = 16
max_ceiling = 300001
else:
assert False
# max_ceiling = 32
all_daids = ibs.get_valid_aids()
max_num = min(max_ceiling, len(all_daids))
flann_params = vt.get_flann_params()
# Output
count_list, time_list_reindex = [], []
count_list2, time_list_addition = [], []
# Setup
# all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
all_randomize_daids_ = all_daids
# ensure all features are computed
ibs.get_annot_vecs(all_randomize_daids_)
def reindex_step(count, count_list, time_list_reindex):
daids = all_randomize_daids_[0:count]
vecs = np.vstack(ibs.get_annot_vecs(daids))
with ut.Timer(verbose=False) as t:
flann = make_flann_index(vecs, flann_params) # NOQA
count_list.append(count)
time_list_reindex.append(t.ellapsed)
def addition_step(count, flann, count_list2, time_list_addition):
daids = all_randomize_daids_[count : count + 1]
vecs = np.vstack(ibs.get_annot_vecs(daids))
with ut.Timer(verbose=False) as t:
flann.add_points(vecs)
count_list2.append(count)
time_list_addition.append(t.ellapsed)
def make_initial_index(initial):
daids = all_randomize_daids_[0 : initial + 1]
vecs = np.vstack(ibs.get_annot_vecs(daids))
flann = make_flann_index(vecs, flann_params)
return flann
WITH_REINDEX = not ut.get_argflag('--no-with-reindex')
if WITH_REINDEX:
# Reindex Part
reindex_lbl = 'Reindexing'
_reindex_iter = range(1, max_num, reindex_stride)
reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1)
for count in reindex_iter:
reindex_step(count, count_list, time_list_reindex)
# Add Part
flann = make_initial_index(initial)
addition_lbl = 'Addition'
_addition_iter = range(initial + 1, max_num, addition_stride)
addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl)
for count in addition_iter:
addition_step(count, flann, count_list2, time_list_addition)
logger.info('---')
logger.info('Reindex took time_list_reindex %.2s seconds' % sum(time_list_reindex))
logger.info('Addition took time_list_reindex %.2s seconds' % sum(time_list_addition))
logger.info('---')
statskw = dict(precision=2, newlines=True)
logger.info('Reindex stats ' + ut.get_stats_str(time_list_reindex, **statskw))
logger.info('Addition stats ' + ut.get_stats_str(time_list_addition, **statskw))
logger.info('Plotting')
# with pt.FigureContext:
next_fnum = iter(range(0, 2)).next # python3 PY3
pt.figure(fnum=next_fnum())
if WITH_REINDEX:
pt.plot2(
count_list,
time_list_reindex,
marker='-o',
equal_aspect=False,
x_label='num_annotations',
label=reindex_lbl + ' Time',
dark=False,
)
# pt.figure(fnum=next_fnum())
pt.plot2(
count_list2,
time_list_addition,
marker='-o',
equal_aspect=False,
x_label='num_annotations',
label=addition_lbl + ' Time',
)
pt
pt.legend()
def subindexer_time_experiment():
"""
builds plot of number of annotations vs indexer build time.
TODO: time experiment
"""
import wbia
import utool as ut
from vtool._pyflann_backend import pyflann as pyflann
import wbia.plottool as pt
ibs = wbia.opendb(db='PZ_Master0')
daid_list = ibs.get_valid_aids()
count_list = []
time_list = []
flann_params = vt.get_flann_params()
for count in ut.ProgressIter(range(1, 301)):
daids_ = daid_list[:]
np.random.shuffle(daids_)
daids = daids_[0:count]
vecs = np.vstack(ibs.get_annot_vecs(daids))
with ut.Timer(verbose=False) as t:
flann = pyflann.FLANN()
flann.build_index(vecs, **flann_params)
count_list.append(count)
time_list.append(t.ellapsed)
count_arr = np.array(count_list)
time_arr = np.array(time_list)
pt.plot2(
count_arr,
time_arr,
marker='-',
equal_aspect=False,
x_label='num_annotations',
y_label='FLANN build time',
)
# pt.update()
def trytest_incremental_add(ibs):
r"""
Args:
ibs (IBEISController):
CommandLine:
python -m wbia.algo.hots._neighbor_experiment --test-test_incremental_add
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.hots.neighbor_index_cache import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('PZ_MTEST')
>>> result = test_incremental_add(ibs)
>>> print(result)
"""
import wbia
sample_aids = wbia.testdata_aids(a='default:pername=1,mingt=2')
aids1 = sample_aids[::2]
aids2 = sample_aids[0:5]
aids3 = sample_aids[:-1] # NOQA
daid_list = aids1 # NOQA
qreq_ = ibs.new_query_request(aids1, aids1)
nnindexer1 = neighbor_index_cache.request_wbia_nnindexer( # NOQA
ibs.new_query_request(aids1, aids1)
)
nnindexer2 = neighbor_index_cache.request_wbia_nnindexer( # NOQA
ibs.new_query_request(aids2, aids2)
)
# TODO: SYSTEM use visual uuids
items = ibs.get_annot_visual_uuids(aids3)
uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_)
candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0)
candidate_sets = candidate_uuids
covertup = ut.greedy_max_inden_setcover(candidate_sets, items)
uncovered_items, covered_items_list, accepted_keys = covertup
covered_items = ut.flatten(covered_items_list)
covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items))
uncovered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(uncovered_items))
nnindexer3 = neighbor_index_cache.request_wbia_nnindexer( # NOQA
ibs.new_query_request(uncovered_aids, uncovered_aids)
)
# TODO: SYSTEM use visual uuids
items = ibs.get_annot_visual_uuids(sample_aids)
uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_)
# contextlib.closing(shelve.open(uuid_map_fpath)) as uuid_map:
candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0)
candidate_sets = candidate_uuids
covertup = ut.greedy_max_inden_setcover(candidate_sets, items)
uncovered_items, covered_items_list, accepted_keys = covertup
covered_items = ut.flatten(covered_items_list)
covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items)) # NOQA
uncovered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(uncovered_items))
# uuid_map_fpath = join(flann_cachedir, 'uuid_map.shelf')
# uuid_map = shelve.open(uuid_map_fpath)
# uuid_map[daids_hashid] = visual_uuid_list
| |
for x in self)
def random(self) -> Any:
if len(self) == 0: return wrap('')
return wrap(random.choice(self))
def last(self) -> Any:
if len(self) == 0: return wrap('')
return wrap(list.__getitem__(self, -1))
def pop(self) -> Any:
if len(self) == 0: return wrap('')
res = wrap(list.__getitem__(self, -1))
list.__delitem__(self, -1)
return res
def all_but_first(self) -> 'List':
if len(self) == 0: return List()
return wrap(list.__getitem__(self, slice(1, len(self))))
def index(self, *args, **kwargs) -> Float:
try:
return Float(list.index(self, *args, **kwargs))
except ValueError:
return Float(-1)
_listify = lambda v: List(wrap(x) for x in v)
_wrappers = { int: Float, float: Float, str: Str, list: _listify, tuple: _listify, set: _listify, dict: lambda v: List(wrap(x) for x in v.items()) }
def wrap(value: Any) -> Any:
'''
Wraps a value in a new type which changes operators like `+`, `-`, `*`, `/`, etc. to act like they do in Snap!.
If the value is already wrapped (see `is_wrapped`), does nothing and returns the value directly.
'''
return _wrappers.get(type(value), lambda v: v)(value)
def is_wrapped(value: Any) -> bool:
'''
Checks if the given value is already wrapped.
'''
return type(value) == type(wrap(value))
def rand(a: Any, b: Any) -> Union[Float, List]:
'''
Returns a random number in the range `[a, b]`.
If both `a` and `b` are integer-valued (including floats holding integer values), returns an integer.
Otherwise, returns a float in the continuous range.
'''
def single(a, b):
a, b = +wrap(a), +wrap(b)
if a == b: return wrap(a)
if a > b: a, b = b, a
ai, bi = int(a), int(b)
if ai == a and bi == b:
return wrap(random.randint(ai, bi))
return wrap(a + random.random() * (b - a))
return _list_binary_op(wrap(a), wrap(b), single)
def sxrange(a: Any, b: Any) -> Sequence[Any]:
'''
Returns a sequence of numbers starting at `a` and going up to and including `b`,
increasing or decreasing by `1` each step depending on if `a < b` or `b < a`.
The initial point, `a` is always included in the resulting sequence.
This is similar to `srange` except that it does not have to actually create a list of all the items.
For instance, you `srange(1, 1000000)` would create a (large) list of one million items,
whereas `sxrange(1, 1000000)` simply generates the numbers one at a times as needed.
'''
def single(a, b):
a, b = +wrap(a), +wrap(b)
step = 1 if b > a else -1
return (a + wrap(i * step) for i in range(math.floor(abs(b - a)) + 1))
return _list_binary_op(wrap(a), wrap(b), single)
def srange(a: Any, b: Any) -> List:
'''
Returns the list of numbers starting at `a` and going up to and including `b`,
increasing or decreasing by `1` each step depending on if `a < b` or `b < a`.
The initial point, `a`, is always included in the resulting list.
Equivalent to collecting all of the sequences returned by `sxrange` into lists.
'''
return _list_unary_op(sxrange(a, b), List)
def sqrt(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(math.sqrt(+x)))
def lnot(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: not x)
def sin(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(math.sin(+x * (math.pi / 180))))
def cos(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(math.cos(+x * (math.pi / 180))))
def tan(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(math.tan(+x * (math.pi / 180))))
def asin(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(math.asin(+x) * (180 / math.pi)))
def acos(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(math.acos(+x) * (180 / math.pi)))
def atan(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(math.atan(+x) * (180 / math.pi)))
def get_ord(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(ord(str(x))))
def get_chr(value: Any) -> Any:
return _list_unary_op(wrap(value), lambda x: wrap(chr(+x)))
def identical(a: Any, b: Any) -> Any:
a, b = wrap(a), wrap(b)
la, lb = _is_list(a), _is_list(b)
if la and lb: return a is b
if la or lb: return False
return a == b
if __name__ == '__main__':
assert is_wrapped(True)
v = wrap('hello world') ; assert v is wrap(v) and isinstance(v, Str) and isinstance(v, str)
v = wrap(1223847982) ; assert v is wrap(v) and isinstance(v, Float) and isinstance(v, float)
v = wrap(1223847982.453) ; assert v is wrap(v) and isinstance(v, Float) and isinstance(v, float)
v = wrap([1,4,2,5,43]) ; assert v is wrap(v) and isinstance(v, List) and isinstance(v, list)
assert all(is_wrapped(v[i]) for i in range(len(v))) ; v.append('hello world') ; assert all(is_wrapped(v[i]) for i in range(len(v)))
assert all(is_wrapped(x) for x in v) ; v.append(12) ; assert all(is_wrapped(x) for x in v)
v = wrap((1,4,2,5,43)) ; assert v is wrap(v) and isinstance(v, List) and isinstance(v, list)
v = wrap({1,3,2,54}) ; assert v is wrap(v) and isinstance(v, List) and isinstance(v, list)
v = wrap({1:1,3:4,2:2,54:3}) ; assert v is wrap(v) and isinstance(v, List) and isinstance(v, list)
assert all(isinstance(x, List) and len(x) == 2 for x in v)
assert v[1][0] == 3 and v[1][1] == 4 and v.last()[0] == 54 and v.last().last() == 3
v = wrap([]) ; v.append(5) ; assert is_wrapped(v['0'])
v = wrap([{'foo': 'bar'}]) ; assert isinstance(list.__getitem__(v, 0), List) and isinstance(list.__getitem__(list.__getitem__(v, 0), 0), List)
assert list.__getitem__(list.__getitem__(v, 0), 0) == ['foo', 'bar'] and is_wrapped(list.__getitem__(list.__getitem__(list.__getitem__(v, 0), 0), 0))
assert is_wrapped(list.__getitem__(list.__getitem__(list.__getitem__(v, 0), 0), 1))
v = wrap(({'foo': 'bar'},)) ; assert isinstance(list.__getitem__(v, 0), List) and isinstance(list.__getitem__(list.__getitem__(v, 0), 0), List)
assert list.__getitem__(list.__getitem__(v, 0), 0) == ['foo', 'bar'] and is_wrapped(list.__getitem__(list.__getitem__(list.__getitem__(v, 0), 0), 0))
assert is_wrapped(list.__getitem__(list.__getitem__(list.__getitem__(v, 0), 0), 1))
assert rand(5, 5) == 5 and rand(5.5, 5.5) == 5.5
assert rand(5, '5') == wrap(5) and is_wrapped(rand(5, '5')) and is_wrapped(rand(wrap(7), '5')) and is_wrapped(rand(5.4, '5'))
for _ in range(10): assert rand(5, 10) % 1 == 0 and rand(5, 10.0) % 1 == 0 and rand(5.0, 10) % 1 == 0 and rand(5.0, 10.0) % 1 == 0
assert sum(rand(5.0, 10.1) % 1 for _ in range(10)) != 0 and sum(rand(5.1, 10) % 1 for _ in range(10)) != 0
assert sum(rand(5.1, 10.0) % 1 for _ in range(10)) != 0 and sum(rand(5.1, 10.6) % 1 for _ in range(10)) != 0
for _ in range(10): assert 5 <= rand(5, 10) <= 10 and 5 <= rand(10, 5) <= 10
for _ in range(10): assert -5.5 <= rand(7, -5.5) <= 7 and -5.5 <= rand(-1, -5.5) <= -1
assert not bool(wrap(None))
assert bool(wrap(True)) and not bool(wrap(False))
assert bool(wrap([])) and bool(wrap([0])) and bool(wrap([1])) and bool(wrap([1, 5]))
assert bool(wrap(set())) and bool(wrap({0})) and bool(wrap({4, 0})) and bool(wrap({4: 4})) and bool(wrap({4: 4, 0: 0}))
assert not bool(wrap('')) and bool(wrap('hello')) and bool(wrap('7')) and bool(wrap('0')) and bool(wrap('nan'))
assert bool(wrap(7)) and bool(wrap(7.6)) and bool(wrap(-7.6)) and bool(wrap(7)) and bool(wrap(7.6)) and bool(wrap(-7.6))
assert not bool(wrap(0)) and not bool(wrap(0.0)) and not bool(wrap(-0.0)) and not bool(wrap(math.nan))
assert wrap(5) * wrap(5) == 25 and wrap(5) * wrap('5') == 25 and wrap('5') * wrap(5) == 25 and wrap('5') * wrap('5') == 25
assert 5 * wrap(5) == 25 and wrap(5) * 5 == 25 and wrap('5') * 5 == 25 and wrap('5') * '5' == 25
assert 5.25 * wrap(4) == 21 and wrap(5.25) * 4 == 21 and wrap('5.25') * 4 == 21 and wrap('5.25') * '4' == 21
assert isinstance(wrap(5.25) * 4, Float) and isinstance(wrap('5.25') * 4, Float) and isinstance(wrap('5.25') * '4', Float)
assert wrap(1000) ** wrap(1000) == math.inf
assert wrap([1,2,3]) + wrap(['6.0',2,-2]) == [7,4,1] and wrap([1,2,3]) - wrap([6,2,-2]) == [-5,0,5] and wrap([1,2,3]) - wrap([6,2]) == [-5,0] and wrap([1]) - wrap([6,2]) == [-5]
assert wrap([[1,5,2], [1,2], [0], []]) + wrap('4') == [[5,9,6], [5,6], [4], []] and wrap([[1,5,2], [1,2], [0], []]) + '4' == [[5,9,6], [5,6], [4], []]
assert wrap([[1,5,2], [1,2], [0], []]) - wrap('2') == [[-1,3,0], [-1,0], [-2], []]
assert wrap([[1,5,2], [1,2], [0], []]) * wrap('2') == [[2,10,4], [2,4], [0], []]
assert wrap([[1,5,2], [1,2], [0], []]) - '2' == [[-1,3,0], [-1,0], [-2], []]
assert wrap([1,2,3]) + 3 == [4,5,6] and wrap([1,2,3]) + wrap(3) == [4,5,6] and wrap([1,2,3]) + '3' == [4,5,6] and wrap([1,2,3]) + wrap('3') == [4,5,6]
assert 5 + wrap([3,2,5]) == [8,7,10] and wrap(5) + wrap([3,2,5]) == [8,7,10] and '5' + wrap([3,2,5]) == [8,7,10] and wrap('5') + wrap([3,2,5]) == [8,7,10]
assert wrap([4,7,2])[0] == 4 and | |
# Importing the Kratos Library
import KratosMultiphysics
from KratosMultiphysics.python_solver import PythonSolver
import KratosMultiphysics.python_linear_solver_factory as linear_solver_factory
# Import applications
import KratosMultiphysics.FluidDynamicsApplication as KratosCFD
from KratosMultiphysics.FluidDynamicsApplication import check_and_prepare_model_process_fluid
def CreateSolver(model, custom_settings):
return FluidSolver(model, custom_settings)
class FluidSolver(PythonSolver):
"""The base class for fluid dynamics solvers.
This class provides functions for importing and exporting models,
adding nodal variables and dofs and solving each solution step.
Depending on the formulation type, derived classes may require to
override some (or all) the following functions:
_CreateScheme
_CreateConvergenceCriterion
_CreateLinearSolver
_CreateBuilderAndSolver
_CreateSolutionStrategy
The solution strategy, builder_and_solver, etc. should alway be retrieved
using the getter functions _GetSolutionStrategy, _GetBuilderAndSolver,
etc. from this base class.
Only the member variables listed below should be accessed directly.
Public member variables:
model -- the model containing the modelpart used to construct the solver.
settings -- Kratos parameters containing solver settings.
"""
def __init__(self, model, settings):
super(FluidSolver,self).__init__(model, settings)
## Set the element and condition names for the replace settings
## These should be defined in derived classes
self.element_name = None
self.condition_name = None
self.min_buffer_size = 3
# Either retrieve the model part from the model or create a new one
model_part_name = self.settings["model_part_name"].GetString()
if model_part_name == "":
raise Exception('Please provide the model part name as the "model_part_name" (string) parameter!')
if self.model.HasModelPart(model_part_name):
self.main_model_part = self.model.GetModelPart(model_part_name)
else:
self.main_model_part = self.model.CreateModelPart(model_part_name)
domain_size = self.settings["domain_size"].GetInt()
if domain_size == -1:
raise Exception('Please provide the domain size as the "domain_size" (int) parameter!')
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, domain_size)
def AddVariables(self):
raise Exception("Trying to call FluidSolver.AddVariables(). Implement the AddVariables() method in the specific derived solver.")
def AddDofs(self):
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_X, KratosMultiphysics.REACTION_X,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Y, KratosMultiphysics.REACTION_Y,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Z, KratosMultiphysics.REACTION_Z,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.PRESSURE, KratosMultiphysics.REACTION_WATER_PRESSURE,self.main_model_part)
KratosMultiphysics.Logger.PrintInfo(self.__class__.__name__, "Fluid solver DOFs added correctly.")
def ImportModelPart(self):
# we can use the default implementation in the base class
self._ImportModelPart(self.main_model_part,self.settings["model_import_settings"])
def PrepareModelPart(self):
if not self.is_restarted():
## Set fluid properties from materials json file
materials_imported = self._SetPhysicalProperties()
if not materials_imported:
KratosMultiphysics.Logger.PrintWarning(self.__class__.__name__, "Material properties have not been imported. Check \'material_import_settings\' in your ProjectParameters.json.")
## Replace default elements and conditions
self._ReplaceElementsAndConditions()
## Executes the check and prepare model process
self._ExecuteCheckAndPrepare()
## Set buffer size
self.main_model_part.SetBufferSize(self.min_buffer_size)
KratosMultiphysics.Logger.PrintInfo(self.__class__.__name__, "Model reading finished.")
def ExportModelPart(self):
## Model part writing
name_out_file = self.settings["model_import_settings"]["input_filename"].GetString()+".out"
KratosMultiphysics.ModelPartIO(name_out_file, KratosMultiphysics.IO.WRITE).WriteModelPart(self.main_model_part)
KratosMultiphysics.Logger.PrintInfo(self.__class__.__name__, "Model export finished.")
def GetMinimumBufferSize(self):
return self.min_buffer_size
def Initialize(self):
raise Exception("Calling FluidSolver.Initialize() base method. Please implement a custom Initialize() method for your solver.")
def AdvanceInTime(self, current_time):
dt = self._ComputeDeltaTime()
new_time = current_time + dt
self.main_model_part.CloneTimeStep(new_time)
self.main_model_part.ProcessInfo[KratosMultiphysics.STEP] += 1
return new_time
def InitializeSolutionStep(self):
if self._TimeBufferIsInitialized():
self._GetSolutionStrategy().InitializeSolutionStep()
def Predict(self):
if self._TimeBufferIsInitialized():
self._GetSolutionStrategy().Predict()
def SolveSolutionStep(self):
if self._TimeBufferIsInitialized():
is_converged = self._GetSolutionStrategy().SolveSolutionStep()
if not is_converged:
msg = "Fluid solver did not converge for step " + str(self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]) + "\n"
msg += "corresponding to time " + str(self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]) + "\n"
KratosMultiphysics.Logger.PrintWarning(self.__class__.__name__, msg)
return is_converged
else:
return True
def FinalizeSolutionStep(self):
if self._TimeBufferIsInitialized():
self._GetSolutionStrategy().FinalizeSolutionStep()
def Check(self):
self._GetSolutionStrategy().Check()
def Clear(self):
self._GetSolutionStrategy().Clear()
def GetComputingModelPart(self):
if not self.main_model_part.HasSubModelPart("fluid_computational_model_part"):
raise Exception("The ComputingModelPart was not created yet!")
return self.main_model_part.GetSubModelPart("fluid_computational_model_part")
## FluidSolver specific methods.
def _TimeBufferIsInitialized(self):
# We always have one extra old step (step 0, read from input)
return self.main_model_part.ProcessInfo[KratosMultiphysics.STEP] + 1 >= self.GetMinimumBufferSize()
def _ReplaceElementsAndConditions(self):
## Get number of nodes and domain size
elem_num_nodes = self._GetElementNumNodes()
cond_num_nodes = self._GetConditionNumNodes()
domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]
## If there are no elements and/or conditions, default to triangles/tetra meshes to avoid breaking the ReplaceElementsAndConditionsProcess
## This only affects the input name (if there are no elements or conditions to replace, nothing is replaced).
if elem_num_nodes == 0:
elem_num_nodes = domain_size + 1
if cond_num_nodes == 0:
cond_num_nodes = domain_size
## Complete the element name
if (self.element_name is not None):
new_elem_name = self.element_name + str(int(domain_size)) + "D" + str(int(elem_num_nodes)) + "N"
else:
raise Exception("There is no element name. Define the self.element_name string variable in your derived solver.")
## Complete the condition name
if (self.condition_name is not None):
new_cond_name = self.condition_name + str(int(domain_size)) + "D" + str(int(cond_num_nodes)) + "N"
else:
raise Exception("There is no condition name. Define the self.condition_name string variable in your derived solver.")
## Set the element and condition names in the Json parameters
#self.settings["element_replace_settings"] = KratosMultiphysics.Parameters("""{}""")
self.settings.AddValue("element_replace_settings", KratosMultiphysics.Parameters("""{}"""))
self.settings["element_replace_settings"].AddEmptyValue("element_name").SetString(new_elem_name)
self.settings["element_replace_settings"].AddEmptyValue("condition_name").SetString(new_cond_name)
## Call the replace elements and conditions process
KratosMultiphysics.ReplaceElementsAndConditionsProcess(self.main_model_part, self.settings["element_replace_settings"]).Execute()
def _GetElementNumNodes(self):
if self.main_model_part.NumberOfElements() != 0:
element_num_nodes = len(self.main_model_part.Elements.__iter__().__next__().GetNodes())
else:
element_num_nodes = 0
element_num_nodes = self.main_model_part.GetCommunicator().GetDataCommunicator().MaxAll(element_num_nodes)
return element_num_nodes
def _GetConditionNumNodes(self):
if self.main_model_part.NumberOfConditions() != 0:
condition_num_nodes = len(self.main_model_part.Conditions.__iter__().__next__().GetNodes())
else:
condition_num_nodes = 0
condition_num_nodes = self.main_model_part.GetCommunicator().GetDataCommunicator().MaxAll(condition_num_nodes)
return condition_num_nodes
def _ExecuteCheckAndPrepare(self):
## Check that the input read has the shape we like
prepare_model_part_settings = KratosMultiphysics.Parameters("{}")
prepare_model_part_settings.AddValue("volume_model_part_name",self.settings["volume_model_part_name"])
prepare_model_part_settings.AddValue("skin_parts",self.settings["skin_parts"])
prepare_model_part_settings.AddValue("assign_neighbour_elements_to_conditions",self.settings["assign_neighbour_elements_to_conditions"])
check_and_prepare_model_process_fluid.CheckAndPrepareModelProcess(self.main_model_part, prepare_model_part_settings).Execute()
def _ComputeDeltaTime(self):
# Automatic time step computation according to user defined CFL number
if (self.settings["time_stepping"]["automatic_time_step"].GetBool()):
delta_time = self.GetEstimateDtUtility().EstimateDt()
# User-defined delta time
else:
delta_time = self.settings["time_stepping"]["time_step"].GetDouble()
return delta_time
def _SetPhysicalProperties(self):
# Check if the fluid properties are provided using a .json file
materials_filename = self.settings["material_import_settings"]["materials_filename"].GetString()
if (materials_filename != ""):
# Add constitutive laws and material properties from json file to model parts.
material_settings = KratosMultiphysics.Parameters("""{"Parameters": {"materials_filename": ""}} """)
material_settings["Parameters"]["materials_filename"].SetString(materials_filename)
KratosMultiphysics.ReadMaterialsUtility(material_settings, self.model)
materials_imported = True
else:
materials_imported = False
# If the element uses nodal material properties, transfer them to the nodes
if self.element_has_nodal_properties:
self._SetNodalProperties()
return materials_imported
def _SetNodalProperties(self):
err_msg = "Calling base FluidSolver \'_SetNodalProperties\' method.\n"
err_msg += "This must be implemented in the derived solver in accordance with the element formulation."
raise Exception(err_msg)
# TODO: I THINK THIS SHOULD BE MOVED TO THE BASE PYTHON SOLVER
def is_restarted(self):
# this function avoids the long call to ProcessInfo and is also safer
# in case the detection of a restart is changed later
return self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED]
def GetEstimateDtUtility(self):
if not hasattr(self, '_estimate_dt_utility'):
self._estimate_dt_utility = self._CreateEstimateDtUtility()
return self._estimate_dt_utility
def _GetScheme(self):
if not hasattr(self, '_scheme'):
self._scheme = self._CreateScheme()
return self._scheme
def _GetConvergenceCriterion(self):
if not hasattr(self, '_convergence_criterion'):
self._convergence_criterion = self._CreateConvergenceCriterion()
return self._convergence_criterion
def _GetLinearSolver(self):
if not hasattr(self, '_linear_solver'):
self._linear_solver = self._CreateLinearSolver()
return self._linear_solver
def _GetBuilderAndSolver(self):
if not hasattr(self, '_builder_and_solver'):
self._builder_and_solver = self._CreateBuilderAndSolver()
return self._builder_and_solver
def _GetSolutionStrategy(self):
if not hasattr(self, '_solution_strategy'):
self._solution_strategy = self._CreateSolutionStrategy()
return self._solution_strategy
def _CreateEstimateDtUtility(self):
estimate_dt_utility = KratosCFD.EstimateDtUtility(
self.GetComputingModelPart(),
self.settings["time_stepping"])
return estimate_dt_utility
def _CreateScheme(self):
domain_size = self.GetComputingModelPart().ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]
# Cases in which the element manages the time integration
if self.element_integrates_in_time:
# "Fake" scheme for those cases in where the element manages the time integration
# It is required to perform the nodal update once the current time step is solved
scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticSchemeSlip(
domain_size,
domain_size + 1)
# In case the BDF2 scheme is used inside the element, the BDF time discretization utility is required to update the BDF coefficients
if (self.settings["time_scheme"].GetString() == "bdf2"):
time_order = 2
self.time_discretization = KratosMultiphysics.TimeDiscretization.BDF(time_order)
else:
err_msg = "Requested elemental time scheme \"" + self.settings["time_scheme"].GetString()+ "\" is not available.\n"
err_msg += "Available options are: \"bdf2\""
raise Exception(err_msg)
# Cases in which a time scheme manages the time integration
else:
# Bossak time integration scheme
if self.settings["time_scheme"].GetString() == "bossak":
if self.settings["consider_periodic_conditions"].GetBool() == True:
scheme = KratosCFD.ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
self.settings["alpha"].GetDouble(),
domain_size,
KratosCFD.PATCH_INDEX)
else:
scheme = KratosCFD.ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
self.settings["alpha"].GetDouble(),
self.settings["move_mesh_strategy"].GetInt(),
domain_size)
# BDF2 time integration scheme
elif self.settings["time_scheme"].GetString() == "bdf2":
scheme = KratosCFD.BDF2TurbulentScheme()
# Time scheme for steady state fluid solver
elif self.settings["time_scheme"].GetString() == "steady":
scheme = KratosCFD.ResidualBasedSimpleSteadyScheme(
self.settings["velocity_relaxation"].GetDouble(),
self.settings["pressure_relaxation"].GetDouble(),
domain_size)
else:
err_msg = "Requested time scheme " + self.settings["time_scheme"].GetString() + " is not available.\n"
err_msg += "Available options are: \"bossak\", \"bdf2\" and \"steady\""
raise Exception(err_msg)
return scheme
def _CreateLinearSolver(self):
linear_solver_configuration = self.settings["linear_solver_settings"]
return linear_solver_factory.ConstructSolver(linear_solver_configuration)
def _CreateConvergenceCriterion(self):
if self.settings["time_scheme"].GetString() == "steady":
convergence_criterion = KratosMultiphysics.ResidualCriteria(
self.settings["relative_velocity_tolerance"].GetDouble(),
self.settings["absolute_velocity_tolerance"].GetDouble())
else:
convergence_criterion = KratosMultiphysics.MixedGenericCriteria(
[(KratosMultiphysics.VELOCITY, self.settings["relative_velocity_tolerance"].GetDouble(), self.settings["absolute_velocity_tolerance"].GetDouble()),
(KratosMultiphysics.PRESSURE, self.settings["relative_pressure_tolerance"].GetDouble(), self.settings["absolute_pressure_tolerance"].GetDouble())])
convergence_criterion.SetEchoLevel(self.settings["echo_level"].GetInt())
return convergence_criterion
def _CreateBuilderAndSolver(self):
linear_solver = self._GetLinearSolver()
if self.settings["consider_periodic_conditions"].GetBool():
builder_and_solver = KratosCFD.ResidualBasedBlockBuilderAndSolverPeriodic(
linear_solver,
KratosCFD.PATCH_INDEX)
else:
builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(linear_solver)
return builder_and_solver
def _CreateSolutionStrategy(self):
analysis_type = self.settings["analysis_type"].GetString()
if analysis_type == "linear":
solution_strategy = self._CreateLinearStrategy()
elif analysis_type == "non_linear":
solution_strategy = self._CreateNewtonRaphsonStrategy()
else:
err_msg = "The requested analysis type \"" + analysis_type + "\" is not available!\n"
err_msg += "Available options are: \"linear\", \"non_linear\""
raise Exception(err_msg)
return solution_strategy
def _CreateLinearStrategy(self):
computing_model_part = self.GetComputingModelPart()
time_scheme = self._GetScheme()
builder_and_solver = self._GetBuilderAndSolver()
calculate_norm_dx = False
return KratosMultiphysics.ResidualBasedLinearStrategy(
computing_model_part,
time_scheme,
builder_and_solver,
self.settings["compute_reactions"].GetBool(),
self.settings["reform_dofs_at_each_step"].GetBool(),
calculate_norm_dx,
self.settings["move_mesh_flag"].GetBool())
def _CreateNewtonRaphsonStrategy(self):
computing_model_part = self.GetComputingModelPart()
time_scheme = self._GetScheme()
convergence_criterion = self._GetConvergenceCriterion()
builder_and_solver | |
<reponame>dhermes/persistent-cal<gh_stars>1-10
#!/usr/bin/python
# Copyright (C) 2010-2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes for persistent-cal."""
__author__ = '<EMAIL> (<NAME>)'
# General libraries
import datetime
import logging
# App engine specific libraries
from google.appengine.ext import ndb
# App specific libraries
from custom_exceptions import InappropriateAPIAction
from custom_exceptions import MissingUID
from custom_exceptions import UnexpectedDescription
from google_api_utils import AttemptAPIAction
import time_utils
CALENDAR_ID = '<EMAIL>'
class TimeKeyword(ndb.Model): # pylint:disable-msg=R0904
"""Model for representing a time with an associated keyword as well.
This is in place because the API specification calls for times to be
represented as {'dateTime': '2012-01-01T12:00:00.000Z'} or
{'date': '2012-01-01'}, so both the string value and the keyword are
useful to keep around.
"""
# pylint:disable-msg=E1101
keyword = ndb.StringProperty(required=True)
value = ndb.StringProperty(required=True)
@classmethod
# pylint:disable-msg=C0103
def from_ical_event(cls, ical_event, ical_attr):
"""Class method to parse a TimeKeyword from an ical_event and keyword.
It creates a new instance, parsing the value from the ical_event using the
ical_attr provided.
Args:
ical_event: an icalendar.cal.Event object to be parsed
ical_attr: The attribute to be parsed from the iCal instance
Returns:
An instance of TimeKeyword from the parsing
"""
value = time_utils.FormatTime(ical_event.get(ical_attr).dt)
keyword = 'dateTime' if value.endswith('Z') else 'date'
return cls(keyword=keyword, value=value)
def as_dict(self): # pylint:disable-msg=C0103
"""Returns the TimeKeyword as a dictionary with keyword as key for value."""
return {self.keyword: self.value}
def to_datetime(self): # pylint:disable-msg=C0103
"""Returns the TimeKeyword as a datetime.datetime.
This will likely throw an error if keyword is not one of date or dateTime.
Returns:
A datetime.datetime instance parsed from the values
"""
time_parse = None
if self.keyword == 'date':
time_parse = '%Y-%m-%d'
elif self.keyword == 'dateTime':
time_parse = '%Y-%m-%dT%H:%M:%S.000Z'
return datetime.datetime.strptime(self.value, time_parse)
def __eq__(self, other):
"""Custom equality function using only the attributes.
Args:
other: The other value to be compared against
"""
if not isinstance(other, TimeKeyword):
return False
return self.keyword == other.keyword and self.value == other.value
def __ne__(self, other):
"""Custom negation of equality function using only the attributes.
Args:
other: The other value to be compared against
"""
return not self.__eq__(other)
def __repr__(self):
return 'TimeKeyword({!r})'.format(self.as_dict())
def ConvertedDescription(ical_event):
"""Parses and converts a description from an iCal event.
Args:
ical_event: an icalendar.cal.Event object to be parsed
Returns:
Two strings description and location parsed from {ical_event}
"""
uid = unicode(ical_event.get('uid', ''))
description = unicode(ical_event.get('description', ''))
location = unicode(ical_event.get('location', ''))
# The phrase 'No destination specified' does not match its
# counterpart in the description, so we transform {location}.
if location == 'No destination specified':
location = 'an unspecified location'
# Check description is formed as we expect
if not uid.startswith('item-'):
target = ' is in {} '.format(location)
if description.count(target) != 1:
raise UnexpectedDescription(description)
# remove name from the description
description = 'In {location} {description}'.format(
location=location, description=description.split(target)[1])
return description, location
class Event(ndb.Model): # pylint:disable-msg=R0904
"""Holds data for a calendar event (including shared attendees)."""
# pylint:disable-msg=E1101
description = ndb.TextProperty(default='')
start = ndb.StructuredProperty(TimeKeyword, required=True)
end = ndb.StructuredProperty(TimeKeyword, required=True)
location = ndb.StringProperty(default='')
summary = ndb.StringProperty(required=True)
attendees = ndb.UserProperty(repeated=True)
gcal_edit = ndb.StringProperty()
sequence = ndb.IntegerProperty(default=0)
def insert(self, credentials=None): # pylint:disable-msg=C0103
"""Will insert the event into GCal and then put the values into datastore.
Args:
credentials: An OAuth2Credentials object used to build a service object.
In the case the credentials is the default value of None, future
methods will attempt to get credentials from the default credentials.
Returns:
A boolean value indicating whether the operation was successful.
Raises:
InappropriateAPIAction in the case that a corresponding GCal event has
already been inserted
"""
if self.gcal_edit is not None:
raise InappropriateAPIAction('Insert attempted when id already set.')
event_data = self.as_dict()
event_data.pop('id')
inserted_event = AttemptAPIAction('insert', credentials=credentials,
calendarId=CALENDAR_ID, body=event_data)
if inserted_event is None:
return False # failed
self.gcal_edit = inserted_event['id']
self.sequence = inserted_event.get('sequence', 0)
self.put()
return True
def update(self, credentials=None): # pylint:disable-msg=C0103
"""Will update the event in GCal and then put updated values to datastore.
Args:
credentials: An OAuth2Credentials object used to build a service object.
In the case the credentials is the default value of None, future
methods will attempt to get credentials from the default credentials.
Returns:
A boolean value indicating whether the operation was successful.
Raises:
InappropriateAPIAction in the case that there is no GCal event to update
"""
if self.gcal_edit is None:
raise InappropriateAPIAction('Update attempted when id not set.')
log_msg = '{} updated'.format(self.gcal_edit)
updated_event = AttemptAPIAction('update', log_msg=log_msg,
credentials=credentials,
calendarId=CALENDAR_ID,
eventId=self.gcal_edit,
body=self.as_dict())
if updated_event is None:
return False # failed
sequence = updated_event.get('sequence', None)
if sequence is not None:
self.sequence = sequence
self.put()
return True
# pylint:disable-msg=C0103,W0221
def delete(self, credentials=None):
"""Will delete the event in GCal and then delete from the datastore.
Args:
credentials: An OAuth2Credentials object used to build a service object.
In the case the credentials is the default value of None, future
methods will attempt to get credentials from the default credentials.
Raises:
InappropriateAPIAction in the case that there is no GCal event to delete
"""
if self.gcal_edit is None:
raise InappropriateAPIAction('Update attempted when id not set.')
log_msg = '{} deleted'.format(self.gcal_edit)
delete_response = AttemptAPIAction('delete', log_msg=log_msg,
credentials=credentials,
calendarId=CALENDAR_ID,
eventId=self.gcal_edit)
if delete_response is None:
return
self.key.delete()
@classmethod
# pylint:disable-msg=C0103
def from_ical_event(cls, ical_event, current_user, credentials=None):
"""Class method to update/add an event from an ical_event.
It either retrieves an existing instance and updates it, or if no such
object exists, creates a new one with the attributes from the ical_event.
Args:
ical_event: an icalendar.cal.Event object to be parsed
current_user: a User instance corresponding to the user that is updating
credentials: An OAuth2Credentials object used to build a service object.
In the case the credentials is the default value of None, future
methods will attempt to get credentials from the default credentials.
Returns:
A pair event, failed where event is an Event object that has been inserted
or updated and failed is a boolean indicating failure (or lack of).
Raises:
MissingUID in the case that there is no UID in the iCal event
"""
uid = ical_event.get('uid', None)
if uid is None:
raise MissingUID(ical_event)
# convert from type icalendar.prop.vText to unicode
uid = unicode(uid)
event_data = {}
summary = ical_event.get('summary', None)
if not summary:
summary = '(No Title)'
# convert from type icalendar.prop.vText to unicode
event_data['summary'] = unicode(summary)
description, location = ConvertedDescription(ical_event)
event_data['description'] = description
event_data['location'] = location
event_data['start'] = TimeKeyword.from_ical_event(ical_event, 'dtstart')
event_data['end'] = TimeKeyword.from_ical_event(ical_event, 'dtend')
event = ndb.Key(cls, uid).get()
if event is not None:
changed = False
for attr, value in event_data.iteritems():
if getattr(event, attr) != value:
setattr(event, attr, value)
logging.info('{attr} changed for {uid}'.format(attr=attr, uid=uid))
changed = True
if current_user not in event.attendees: # pylint:disable-msg=E1103
event.attendees.append(current_user) # pylint:disable-msg=E1103
logging.info('attendees changed for {uid}'.format(uid=uid))
changed = True
success = True
if changed:
# pylint:disable-msg=E1103
success = event.update(credentials=credentials)
return event, not success
else:
# pylint:disable-msg=W0142
event = cls(key=ndb.Key(cls, uid), attendees=[current_user], **event_data)
success = event.insert(credentials=credentials)
return event, not success
@ndb.ComputedProperty
def end_date(self): # pylint:disable-msg=C0103
"""Derived property that turns end into a date string."""
end_datetime = self.end.to_datetime()
end_date = end_datetime.date()
return end_date.strftime('%Y-%m-%d')
def attendee_emails(self): # pylint:disable-msg=C0103
"""Returns a list of dictionaries corresponding to attendee emails."""
return [{'email': attendee.email()} for attendee in self.attendees]
def as_dict(self): # pylint:disable-msg=C0103
"""Returns the Event as a dictionary corresponding to the API spec.
Returns:
A dictionary to be used with the API client library representing all
the data in the model object.
"""
return {'start': self.start.as_dict(),
'end': self.end.as_dict(),
'summary': self.summary,
'location': self.location,
'description': self.description,
'id': self.gcal_edit,
'sequence': self.sequence,
'attendees': self.attendee_emails()}
def __repr__(self):
return 'Event(name={})'.format(self.key.id())
class UserCal(ndb.Model): # pylint:disable-msg=R0903
"""Holds data for a calendar event (including shared owners)."""
# pylint:disable-msg=E1101
owner = ndb.UserProperty(required=True)
calendars = ndb.StringProperty(repeated=True)
update_intervals = ndb.IntegerProperty(repeated=True)
upcoming = ndb.StringProperty(repeated=True)
def put(self): # pylint:disable-msg=C0103
"""Customized put function that first sorts the list in upcoming."""
self.upcoming = | |
<reponame>okotaku/timmextension<gh_stars>0
# --------------------------------------------------------
# Model from official source: https://github.com/Chenglin-Yang/LESA_classification # noqa
# --------------------------------------------------------
import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from einops import rearrange
from mmcv.cnn import build_conv_layer
from timm.models.features import FeatureInfo
from timm.models.registry import register_model
from torch import Tensor, einsum, nn
from .helpers import timmextension_build_model_with_cfg
default_cfgs = {
'lesa_resnet50': {
'url':
'https://github.com/okotaku/timmextension/releases/download/w_lesa/lesa_resnet50.pth' # noqa
},
'lesa_wrn50': {
'url':
'https://github.com/okotaku/timmextension/releases/download/w_lesa/lesa_wrn50.pth' # noqa
},
}
def expand_dim(t, dim, k):
t = t.unsqueeze(dim=dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
b, h, l, _, device, dtype = *x.shape, x.device, x.dtype
dd = {'device': device, 'dtype': dtype}
col_pad = torch.zeros((b, h, l, 1), **dd)
x = torch.cat((x, col_pad), dim=3)
flat_x = rearrange(x, 'b h l c -> b h (l c)')
flat_pad = torch.zeros((b, h, l - 1), **dd)
flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)
final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1)
final_x = final_x[:, :, :l, (l - 1):]
return final_x
def relative_logits_1d(q, rel_k):
b, heads, h, w, dim = q.shape
logits = einsum('b h x y d, r d -> b h x y r', q, rel_k)
logits = rearrange(logits, 'b h x y r -> b (h x) y r')
logits = rel_to_abs(logits)
logits = logits.reshape(b, heads, h, w, w)
logits = expand_dim(logits, dim=3, k=h)
return logits
class LESA(nn.Module):
def __init__(
self,
in_planes,
out_planes,
groups,
type,
pe_type,
df_channel_shrink,
df_kernel_size,
df_group,
with_cp_UB_terms_only,
kernel_size=56,
stride=1,
bias=False,
dcn=None,
**kwargs,
):
assert (in_planes % groups == 0) and (out_planes % groups == 0)
super().__init__()
assert type == 'LESA'
self.pe_type = pe_type
self.with_cp = with_cp_UB_terms_only
self.fmap_size = kernel_size
self.branch_planes = out_planes
self.in_planes = in_planes
self.out_planes = out_planes
self.groups = groups
self.qk_planes = out_planes // groups // 2
self.v_planes = self.branch_planes // groups
kernel_size = kernel_size**2
self.kernel_size = kernel_size
self.stride = stride
self.bias = bias
# Multi-head self attention
self.qkv_transform = nn.Conv1d(
in_planes,
(self.out_planes + self.branch_planes),
kernel_size=1,
stride=1,
padding=0,
groups=1,
bias=False,
)
self.bn_qkv = nn.BatchNorm1d(self.out_planes + self.branch_planes)
if pe_type == 'classification':
self.bn_similarity = nn.BatchNorm2d(groups * 3)
self.bn_output = nn.BatchNorm1d(self.branch_planes * 2)
elif pe_type == 'detection_qr':
self.bn_output = nn.BatchNorm1d(self.branch_planes)
self.bn_similarity = nn.BatchNorm2d(groups * 2)
else:
raise NotImplementedError
ReaPlanes = self.branch_planes
if dcn is not None:
x_layers = [
build_conv_layer(
dcn,
in_planes,
self.branch_planes,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
groups=groups,
bias=False,
)
]
else:
x_layers = [
nn.Conv2d(
in_planes,
self.branch_planes,
kernel_size=3,
padding=1,
groups=groups,
bias=False,
)
]
if groups != 1:
x_layers += [
nn.Conv2d(
self.branch_planes,
self.branch_planes,
kernel_size=1,
bias=False,
)
]
self.x_transform = nn.Sequential(*x_layers)
self.bn_x = nn.BatchNorm2d(self.branch_planes)
r_layers = []
InChannels = self.branch_planes * 2
r_layers += [nn.ReLU(inplace=True)]
for n_idx in range(len(df_channel_shrink)):
r_layers += [
nn.Conv2d(
InChannels,
int(InChannels / df_channel_shrink[n_idx]),
kernel_size=df_kernel_size[n_idx],
padding=(df_kernel_size[n_idx] - 1) // 2,
groups=df_group[n_idx],
bias=False,
),
nn.BatchNorm2d(int(InChannels / df_channel_shrink[n_idx])),
nn.ReLU(inplace=True),
]
InChannels = int(InChannels / df_channel_shrink[n_idx])
self.reasoning = nn.Sequential(*r_layers)
TarPlanes = ReaPlanes
proj_layers = []
proj_layers.append(
nn.Conv2d(
InChannels,
TarPlanes,
kernel_size=df_kernel_size[-1],
groups=df_group[-1],
bias=False,
), )
proj_layers.append(nn.BatchNorm2d(TarPlanes))
self.projection = nn.Sequential(*proj_layers)
# Position embedding
if pe_type == 'classification':
self.pe_dim = self.qk_planes * 2 + self.v_planes
self.relative = nn.Parameter(
torch.randn(self.pe_dim, kernel_size * 2 - 1),
requires_grad=True,
)
query_index = torch.arange(kernel_size).unsqueeze(0)
key_index = torch.arange(kernel_size).unsqueeze(1)
relative_index = key_index - query_index + kernel_size - 1
self.register_buffer('flatten_index', relative_index.view(-1))
elif pe_type == 'detection_qr':
self.pe_dim = self.qk_planes
scale = self.pe_dim**-0.5
self.rel_height = nn.Parameter(
torch.randn(self.fmap_size * 2 - 1, self.pe_dim) * scale)
self.rel_width = nn.Parameter(
torch.randn(self.fmap_size * 2 - 1, self.pe_dim) * scale)
else:
raise NotImplementedError
if stride > 1:
self.pooling = nn.AvgPool2d(stride, stride=stride)
self.reset_parameters()
def _rel_emb(self, q, rel_width, rel_height):
h, w = self.fmap_size, self.fmap_size
q = rearrange(q, 'b h d (x y) -> b h x y d', x=h, y=w)
rel_logits_w = relative_logits_1d(q, rel_width)
rel_logits_w = rearrange(rel_logits_w, 'b h x i y j-> b h (x y) (i j)')
q = rearrange(q, 'b h x y d -> b h y x d')
rel_logits_h = relative_logits_1d(q, rel_height)
rel_logits_h = rearrange(rel_logits_h,
'b h x i y j -> b h (y x) (j i)')
return rel_logits_w + rel_logits_h
def _rel_emb_ve(self, q, rel_all):
tmp = rearrange(rel_all, 'r d -> d r').unsqueeze(0)
tmp = expand_dim(tmp, 2, self.kernel_size)
tmp = rel_to_abs(tmp).squeeze(0)
return einsum('bgij, cij -> bgci', q, tmp)
def _binary_forward(self, x):
N, C, HW = x.shape
# Transformations
qkv = self.bn_qkv(self.qkv_transform(x))
q, k, v = torch.split(qkv.reshape(N, self.groups,
self.qk_planes * 2 + self.v_planes,
HW),
[self.qk_planes, self.qk_planes, self.v_planes],
dim=2)
qk = torch.einsum('bgci, bgcj->bgij', q, k)
if self.pe_type is None:
stacked_similarity = qk
stacked_similarity = self.bn_similarity(stacked_similarity)
elif self.pe_type == 'detection_qr':
stacked_similarity = qk
qr = self._rel_emb(q, self.rel_width, self.rel_height)
stacked_similarity = self.bn_similarity(
torch.cat([stacked_similarity, qr],
dim=1)).view(N, 2, self.groups, HW, HW).sum(dim=1)
elif self.pe_type == 'classification':
all_embeddings = torch.index_select(
self.relative, 1,
self.flatten_index).view(self.qk_planes * 2 + self.v_planes,
self.kernel_size, self.kernel_size)
q_embedding, k_embedding, v_embedding = torch.split(
all_embeddings,
[self.qk_planes, self.qk_planes, self.v_planes],
dim=0,
)
qr = torch.einsum('bgci,cij->bgij', q, q_embedding)
kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3)
stacked_similarity = torch.cat([qk, qr, kr], dim=1)
stacked_similarity = self.bn_similarity(stacked_similarity).view(
N, 3, self.groups, HW, HW).sum(dim=1)
else:
raise NotImplementedError
similarity = F.softmax(stacked_similarity, dim=3)
sv = torch.einsum('bgij,bgcj->bgci', similarity, v)
if self.pe_type == 'classification':
sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding)
stacked_binary = torch.cat([sv, sve],
dim=-1).view(N, self.branch_planes * 2,
HW)
binary = self.bn_output(stacked_binary).view(
N, self.branch_planes, 2, HW).sum(dim=-2)
elif self.pe_type == 'detection_qr':
stacked_binary = sv.reshape(N, self.branch_planes, HW)
binary = self.bn_output(stacked_binary)
elif self.pe_type is None:
stacked_binary = sv.reshape(N, self.branch_planes, HW)
binary = self.bn_output(stacked_binary)
else:
raise NotImplementedError
return binary
def _unary_forward(self, x):
unary = self.bn_x(self.x_transform(x))
return unary
def forward(self, x):
# unary
if self.with_cp:
unary = cp.checkpoint(self._unary_forward, x)
else:
unary = self._unary_forward(x)
N, C, H, W = x.shape
x = x.view(N, C, H * W)
# binary
if self.with_cp:
binary = cp.checkpoint(self._binary_forward, x)
else:
binary = self._binary_forward(x)
binary = binary.view(N, self.branch_planes, H, W)
gate_in = torch.cat([unary, binary], dim=1)
r = self.reasoning(gate_in)
gate = self.projection(r)
gate = torch.sigmoid(gate)
binary = gate * binary
output = binary + unary
if self.stride > 1:
output = self.pooling(output)
return output
def reset_parameters(self):
self.qkv_transform.weight.data.normal_(0,
math.sqrt(1. / self.in_planes))
# nn.init.uniform_(self.relative, -0.1, 0.1)
if self.pe_type == 'classification':
nn.init.normal_(self.relative, 0.,
math.sqrt(1. / self.v_planes * 1))
def lesa3x3(**kwargs):
return LESA(**kwargs, )
def conv3x3(in_planes: int,
out_planes: int,
stride: int = 1,
groups: int = 1,
dilation: int = 1,
shape_kernel_size=None,
**kwargs) -> nn.Conv2d:
"""3x3 convolution with padding."""
# print(stride)
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution."""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
lesa=None,
shape_kernel_size=None,
**kwargs,
):
super(Bottleneck, self).__init__()
self.with_lesa = lesa is not None
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample
# the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
if self.with_lesa:
lesa = lesa if lesa is not None else {}
self.conv2 = lesa3x3(in_planes=width,
out_planes=width,
kernel_size=shape_kernel_size,
stride=stride,
bias=False,
dcn=None,
**lesa,
**kwargs)
else:
self.conv2 = conv3x3(width,
width,
stride,
groups,
dilation,
shape_kernel_size=shape_kernel_size,
**kwargs)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x, token=None, **kwargs):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.with_lesa:
out = self.conv2(out)
else:
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class LesaResNet(nn.Module):
def __init__(
self,
block,
layers,
lesa,
wrn=False,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
strides=(1, 2, 2, 1),
stage_with_lesa=(False, False, True, True),
stage_spatial_res=[56, 28, 14, 14],
stage_with_first_conv=[True, True, True, False],
features_only=False,
out_indices=(0, 1, 2, 3),
**kwargs,
):
super().__init__()
if wrn:
width_per_group = width_per_group * 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = int(64)
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError('replace_stride_with_dilation should be None '
'or a 3-element tuple, got {}'.format(
| |
{"xoff": "31744", "xon":"19456"},
"pg_lossless_25000_300m_profile": {"xoff": "44032", "xon":"19456"},
"pg_lossless_40000_300m_profile": {"xoff": "55296", "xon":"19456"},
"pg_lossless_50000_300m_profile": {"xoff": "63488", "xon":"19456"},
"pg_lossless_100000_300m_profile": {"xoff": "102400", "xon":"19456"}}
},
"spc3_headroom": {
"default": ("version_1_0_4", "spc3_headroom")
}
}
},
"version_1_0_6": {
# Version 1.0.6 is introduced for 2km cable support
#
# pool_mapped_from_old_version is not required because no pool flavor mapping changed
# Buffer pool info for normal mode
"buffer_pool_list" : ['ingress_lossless_pool', 'ingress_lossy_pool', 'egress_lossless_pool', 'egress_lossy_pool'],
"buffer_pools": {
"spc1_2700_t1_pool_shp": {"doublepool": { "size": "4439552", "xoff": "2146304" }, "egress_lossless_pool": { "size": "13945824"}},
# Buffer pool for single pool
"spc1_2700_t1_single_pool_shp": {"singlepool": { "size": "8719360", "xoff": "2146304" }, "egress_lossless_pool": { "size": "13945824"}},
# The following pools are used for upgrading from 1.0.5 to the newer version
"spc2_3800-c64_t1_pool_shp": {"singlepool": {"size": "24219648", "xoff": "4169728"}, "egress_lossless_pool": {"size": "34287552"}}
},
"buffer_pools_inherited": {
"version_1_0_4": ["spc1_t0_pool", "spc1_t1_pool", "spc2_t0_pool", "spc2_t1_pool", "spc3_t0_pool", "spc3_t1_pool"],
"version_1_0_5": [# Generic SKUs for 3800
"spc2_3800_t0_pool",
"spc2_3800_t1_pool",
# Non generic SKUs
"spc1_2700_t0_pool_shp",
"spc1_2700_t0_single_pool_shp",
"spc1_2700-d48c8_t0_pool_shp",
"spc1_2700-d48c8_t0_single_pool_shp",
"spc2_3800-c64_t0_pool_shp", "spc2_3800-d112c8_t0_pool_shp",
"spc2_3800-d24c52_t0_pool_shp", "spc2_3800-d28c50_t0_pool_shp",
"spc1_2700-d48c8_t1_pool_shp",
"spc1_2700-d48c8_t1_single_pool_shp",
"spc2_3800-d112c8_t1_pool_shp",
"spc2_3800-d24c52_t1_pool_shp", "spc2_3800-d28c50_t1_pool_shp"],
}
},
"version_2_0_0": {
# Version 2.0.0 is introduced for dynamic buffer calculation
#
"pool_mapped_from_old_version": {
"spc1_t0_pool": "spc1_pool",
"spc1_t1_pool": "spc1_pool",
"spc2_t0_pool": "spc2_pool",
"spc2_t1_pool": "spc2_pool",
"spc2_3800_t0_pool": "spc2_pool",
"spc2_3800_t1_pool": "spc2_pool",
"spc3_t0_pool": "spc3_pool",
"spc3_t1_pool": "spc3_pool"
},
# Buffer pool info for normal mode
"buffer_pool_list" : ['ingress_lossless_pool', 'ingress_lossy_pool', 'egress_lossless_pool', 'egress_lossy_pool'],
"buffer_pools": {
"spc1_pool": {"doublepool": {"size": "dynamic"}, "egress_lossless_pool": { "size": "13945824" }},
"spc2_pool": {"doublepool": {"size": "dynamic"}, "egress_lossless_pool": { "size": "34287552" }},
"spc3_pool": {"doublepool": {"size": "dynamic"}, "egress_lossless_pool": { "size": "60817392" }}
},
"buffer_pools_inherited": {
"version_1_0_5": ["spc1_2700_t0_pool_shp",
"spc1_2700_t0_single_pool_shp",
"spc1_2700-d48c8_t0_pool_shp",
"spc1_2700-d48c8_t0_single_pool_shp",
"spc2_3800-c64_t0_pool_shp", "spc2_3800-d112c8_t0_pool_shp",
"spc2_3800-d24c52_t0_pool_shp", "spc2_3800-d28c50_t0_pool_shp",
"spc1_2700-d48c8_t1_pool_shp",
"spc1_2700-d48c8_t1_single_pool_shp",
"spc2_3800-d112c8_t1_pool_shp",
"spc2_3800-d24c52_t1_pool_shp", "spc2_3800-d28c50_t1_pool_shp"],
"version_1_0_6": ["spc1_2700_t1_pool_shp",
"spc1_2700_t1_single_pool_shp",
"spc2_3800-c64_t1_pool_shp"]
}
}
}
def mlnx_default_buffer_parameters(self, db_version, table):
"""
We extract buffer configurations to a common function
so that it can be reused among different migration
The logic of buffer parameters migrating:
1. Compare the current buffer configuration with the default settings
2. If there is a match, migrate the old value to the new one
3. Insert the new setting into database
Each settings defined below (except that for version_1_0_2) will be used twice:
1. It is referenced as new setting when database is migrated to that version
2. It is referenced as old setting when database is migrated from that version
"""
return self.mellanox_default_parameter[db_version].get(table)
def mlnx_merge_inherited_info(self, db_version, buffer_pools):
inherited_info = self.mlnx_default_buffer_parameters(db_version, "buffer_pools_inherited")
if inherited_info:
for from_version, inherited_pool_list in inherited_info.items():
pools_in_base_version = self.mlnx_default_buffer_parameters(from_version, "buffer_pools")
log.log_info("inherited pool list {} from version {} loaded".format(inherited_pool_list, from_version))
for key in inherited_pool_list:
pool_config = pools_in_base_version.get(key)
if pool_config:
buffer_pools[key] = pool_config
def mlnx_migrate_map_old_pool_to_new(self, pool_mapping, pool_convert_map, old_config_name):
new_config_name = None
if pool_mapping:
new_config_map = pool_mapping.get(old_config_name)
if type(new_config_map) is tuple:
method, mapname = new_config_map
if method == "sku":
skumap = pool_convert_map.get(mapname)
new_config_name = skumap.get(self.sku)
else:
log.log_error("Unsupported mapping method {} found. Stop db_migrator".format(method))
return None
else:
new_config_name = new_config_map
return new_config_name
def mlnx_migrate_extend_condensed_pool(self, pool_config, config_name=None):
condensedpool = pool_config.get("doublepool")
doublepool = False
if not condensedpool:
condensedpool = pool_config.get("singlepool")
if condensedpool:
pool_config.pop("singlepool")
else:
log.log_info("Got old default pool configuration {} {}".format(config_name, pool_config))
else:
pool_config.pop("doublepool")
doublepool = True
if condensedpool:
xoff = condensedpool.get('xoff')
if xoff:
condensedpool.pop('xoff')
if condensedpool['size'] == 'dynamic':
condensedpool.pop('size')
log.log_info("condensed pool {}".format(condensedpool))
condensedpool['type'] = 'egress'
condensedpool['mode'] = 'dynamic'
pool_config['egress_lossy_pool'] = {}
pool_config['egress_lossy_pool'].update(condensedpool)
pool_config['egress_lossless_pool']['type'] = 'egress'
pool_config['egress_lossless_pool']['mode'] = 'dynamic'
condensedpool['type'] = 'ingress'
pool_config['ingress_lossless_pool'] = {}
pool_config['ingress_lossless_pool'].update(condensedpool)
if doublepool:
pool_config['ingress_lossy_pool'] = {}
pool_config['ingress_lossy_pool'].update(condensedpool)
if xoff:
pool_config['ingress_lossless_pool']['xoff'] = xoff
log.log_info("Initialize condensed buffer pool: {}".format(pool_config))
def mlnx_migrate_get_headroom_profiles(self, headroom_profile_set):
if type(headroom_profile_set) is tuple:
version, key = headroom_profile_set
result = self.mlnx_default_buffer_parameters(version, "headrooms")[key]["default"]
elif type(headroom_profile_set) is dict:
result = headroom_profile_set
return result
def mlnx_migrate_extend_headroom_profile(self, headroom_profile):
headroom_profile['dynamic_th'] = '0'
if not 'xoff' in headroom_profile.keys():
headroom_profile['xoff'] = str(int(headroom_profile['size']) - int(headroom_profile['xon']))
elif not 'size' in headroom_profile.keys():
headroom_profile['size'] = headroom_profile['xon']
headroom_profile['pool'] = '[BUFFER_POOL|ingress_lossless_pool]'
return headroom_profile
def mlnx_migrate_buffer_pool_size(self, old_version, new_version):
"""
To migrate buffer pool configuration
"""
self.is_buffer_config_default = False
# Buffer pools defined in old version
default_buffer_pool_list_old = self.mlnx_default_buffer_parameters(old_version, "buffer_pool_list")
# Try to get related info from DB
configdb_buffer_pools = self.configDB.get_table('BUFFER_POOL')
# Get current buffer pool configuration, only migrate configuration which
# with default values, if it's not default, leave it as is.
configdb_buffer_pool_names = configdb_buffer_pools.keys()
# Buffer pool numbers is different from default, we don't need to migrate it
if len(configdb_buffer_pool_names) > len(default_buffer_pool_list_old):
log.log_notice("Pools in CONFIG_DB ({}) don't match default ({}), skip buffer pool migration".format(configdb_buffer_pool_names, default_buffer_pool_list_old))
return True
# If some buffer pool is not default ones, don't need migrate
for buffer_pool in default_buffer_pool_list_old:
if buffer_pool not in configdb_buffer_pool_names and buffer_pool != 'ingress_lossy_pool':
log.log_notice("Default pool {} isn't in CONFIG_DB, skip buffer pool migration".format(buffer_pool))
return True
default_buffer_pools_old = self.mlnx_default_buffer_parameters(old_version, "buffer_pools")
self.mlnx_merge_inherited_info(old_version, default_buffer_pools_old)
default_pool_conf_list_old = self.mlnx_default_buffer_parameters(old_version, "pool_configuration_list")
if not default_pool_conf_list_old:
if default_buffer_pools_old:
default_pool_conf_list_old = default_buffer_pools_old.keys()
if not default_pool_conf_list_old:
log.log_error("Trying to get pool configuration list or migration control failed, skip migration")
return False
new_config_name = None
pool_mapping = self.mlnx_default_buffer_parameters(new_version, "pool_mapped_from_old_version")
pool_convert_map = self.mlnx_default_buffer_parameters(new_version, "pool_convert_map")
log.log_info("got old configuration {}".format(configdb_buffer_pools))
for old_config_name in default_pool_conf_list_old:
old_config = default_buffer_pools_old[old_config_name]
self.mlnx_migrate_extend_condensed_pool(old_config, old_config_name)
log.log_info("Checking old pool configuration {} {}".format(old_config_name, old_config))
if configdb_buffer_pools == old_config:
new_config_name = self.mlnx_migrate_map_old_pool_to_new(pool_mapping, pool_convert_map, old_config_name)
if not new_config_name:
new_config_name = old_config_name
log.log_info("Old buffer pool configuration {} will be migrate to new one {}".format(old_config_name, new_config_name))
break
if not new_config_name:
log.log_notice("The configuration doesn't match any default configuration, migration for pool isn't required")
return True
default_buffer_pools_new = self.mlnx_default_buffer_parameters(new_version, "buffer_pools")
self.mlnx_merge_inherited_info(new_version, default_buffer_pools_new)
new_buffer_pool_conf = default_buffer_pools_new.get(new_config_name)
if not new_buffer_pool_conf:
log.log_error("Can't find the buffer pool configuration for {} in {}".format(new_config_name, new_version))
return False
self.mlnx_migrate_extend_condensed_pool(new_buffer_pool_conf, new_config_name)
# Migrate old buffer conf to latest.
for pool in configdb_buffer_pools:
self.pending_update_items.append(('BUFFER_POOL', pool, None))
for pool in new_buffer_pool_conf:
self.pending_update_items.append(('BUFFER_POOL', pool, new_buffer_pool_conf.get(pool)))
self.is_buffer_config_default = True
return True
def mlnx_migrate_buffer_profile(self, old_version, new_version):
"""
This is to migrate BUFFER_PROFILE configuration
"""
if not self.is_buffer_config_default:
return True
else:
self.is_buffer_config_default = False
# get profile
default_buffer_profiles_old = self.mlnx_default_buffer_parameters(old_version, "buffer_profiles")
default_buffer_profiles_new = self.mlnx_default_buffer_parameters(new_version, "buffer_profiles")
configdb_buffer_profiles = self.configDB.get_table('BUFFER_PROFILE')
# we need to transform lossless pg profiles to new settings
# to achieve that, we just need to remove this kind of profiles, buffermgrd will generate them automatically
default_headroom_sets_old = self.mlnx_default_buffer_parameters(old_version, "headrooms")
default_headroom_sets_new = self.mlnx_default_buffer_parameters(new_version, "headrooms")
default_headrooms_old = None
default_headrooms_new = None
if default_headroom_sets_old and default_headroom_sets_new:
if self.platform == 'x86_64-mlnx_msn3800-r0':
default_headrooms_old = default_headroom_sets_old.get("spc2_3800_headroom")
default_headrooms_new = default_headroom_sets_new.get("spc2_3800_headroom")
elif self.platform in self.spc2_platforms:
default_headrooms_old = default_headroom_sets_old.get("spc2_headroom")
default_headrooms_new = default_headroom_sets_new.get("spc2_headroom")
elif self.platform in self.spc1_platforms:
default_headrooms_old = default_headroom_sets_old.get("spc1_headroom")
default_headrooms_new = default_headroom_sets_new.get("spc1_headroom")
elif self.platform in self.spc3_platforms:
default_headrooms_old = default_headroom_sets_old.get("spc3_headroom")
default_headrooms_new = default_headroom_sets_new.get("spc3_headroom")
if default_headrooms_old and default_headrooms_new:
# match the old lossless profiles?
for headroom_set_name, lossless_profiles in default_headrooms_old.items():
lossless_profiles = self.mlnx_migrate_get_headroom_profiles(lossless_profiles)
matched = True
for name, profile in configdb_buffer_profiles.items():
if name in lossless_profiles.keys():
default_profile = self.mlnx_migrate_extend_headroom_profile(lossless_profiles.get(name))
if profile != default_profile:
log.log_info("Skip headroom profile set {} due to {} mismatched: {} vs {}".format(
headroom_set_name, name, default_profile, profile))
matched = False
break
if matched:
mapping = default_headroom_sets_new.get("mapping")
if not mapping:
new_headroom_set_name = headroom_set_name
log.log_info("Migrate profile set {} ".format(headroom_set_name))
else:
new_headroom_set_name = mapping.get(headroom_set_name)
if type(new_headroom_set_name) is tuple:
log.log_info("Use headroom profiles map {}".format(mapping))
maptype, sku_mapping = new_headroom_set_name
if maptype == "skumap":
new_headroom_set_name = sku_mapping.get(self.sku)
if not new_headroom_set_name:
new_headroom_set_name = headroom_set_name
log.log_info("{} has been mapped to {} according to sku".format(headroom_set_name, new_headroom_set_name))
break
if not matched:
log.log_notice("Headroom profiles don't match any of the default value, skip migrating")
return True
default_headrooms_new = default_headrooms_new.get(new_headroom_set_name)
if type(default_headrooms_new) is dict:
for name, profile in configdb_buffer_profiles.items():
if name in default_headrooms_new.keys():
default_profile = self.mlnx_migrate_extend_headroom_profile(default_headrooms_new.get(name))
self.pending_update_items.append(('BUFFER_PROFILE', name, default_profile))
log.log_info("Profile {} has been migrated to {}".format(name, default_profile))
self.is_buffer_config_default = True
if not default_buffer_profiles_new:
# Not providing new profile configure in new version means they do need to be changed
log.log_notice("No buffer profile in {}, don't need to migrate non-lossless profiles".format(new_version))
return True
profile_matched = True
for _, profiles in default_buffer_profiles_old.items():
for name, profile in profiles.items():
if name in configdb_buffer_profiles.keys() and profile == configdb_buffer_profiles[name]:
continue
# return if any default profile isn't in cofiguration
profile_matched = False
break
if not profile_matched:
log.log_notice("Profiles doesn't match default value".format(name))
return True
for name, profile in default_buffer_profiles_new["default"].items():
log.log_info("Successfully migrate profile {}".format(name))
self.pending_update_items.append(('BUFFER_PROFILE', name, profile))
return True
def mlnx_append_item_on_pending_configuration_list(self, item):
self.pending_update_items.append(item)
def mlnx_abandon_pending_buffer_configuration(self):
"""
We found the buffer configuration on the device doesn't match the default one, so no migration performed
Clear | |
<reponame>karlam123/DBImport<filename>bin/Schedule/Airflow.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import io
import sys
import logging
import subprocess
import shutil
import jaydebeapi
import re
from ConfigReader import configuration
import pendulum
from datetime import date, datetime, time, timedelta
import pandas as pd
from common import constants as constant
from common.Exceptions import *
from Schedule import airflowSchema
from DBImportConfig import configSchema
from DBImportConfig import common_config
import sqlalchemy as sa
# from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy_utils import create_view
from sqlalchemy_views import CreateView, DropView
from sqlalchemy.sql import text, alias, select
from sqlalchemy.orm import aliased, sessionmaker, Query
class initialize(object):
def __init__(self):
logging.debug("Executing Airflow.__init__()")
self.mysql_conn = None
self.mysql_cursor = None
self.debugLogLevel = False
if logging.root.level == 10: # DEBUG
self.debugLogLevel = True
self.common_config = common_config.config()
self.dbimportCommandPath = self.common_config.getConfigValue("airflow_dbimport_commandpath")
self.defaultSudoUser = self.common_config.getConfigValue("airflow_sudo_user")
self.DAGdirectory = self.common_config.getConfigValue("airflow_dag_directory")
self.DAGstagingDirectory = self.common_config.getConfigValue("airflow_dag_staging_directory")
self.DAGfileGroup = self.common_config.getConfigValue("airflow_dag_file_group")
self.DAGfilePermission = self.common_config.getConfigValue("airflow_dag_file_permission")
self.TaskQueueForDummy = self.common_config.getConfigValue("airflow_dummy_task_queue")
self.airflowMajorVersion = self.common_config.getConfigValue("airflow_major_version")
self.defaultTimeZone = self.common_config.getConfigValue("timezone")
self.DAGfile = None
self.DAGfilename = None
self.DAGfilenameInAirflow = None
self.writeDAG = None
self.sensorStartTask = None
self.sensorStopTask = None
self.preStartTask = None
self.preStopTask = None
self.mainStartTask = None
self.mainStopTask = None
self.postStartTask = None
self.postStopTask = None
# Fetch configuration about MySQL database and how to connect to it
self.configHostname = configuration.get("Database", "mysql_hostname")
self.configPort = configuration.get("Database", "mysql_port")
self.configDatabase = configuration.get("Database", "mysql_database")
self.configUsername = configuration.get("Database", "mysql_username")
self.configPassword = configuration.get("Database", "mysql_password")
# Esablish a SQLAlchemy connection to the DBImport database
self.connectStr = "mysql+pymysql://%s:%s@%s:%s/%s"%(
self.configUsername,
self.configPassword,
self.configHostname,
self.configPort,
self.configDatabase)
try:
self.configDB = sa.create_engine(self.connectStr, echo = self.debugLogLevel)
self.configDB.connect()
self.configDBSession = sessionmaker(bind=self.configDB)
except sa.exc.OperationalError as err:
logging.error("%s"%err)
self.common_config.remove_temporary_files()
sys.exit(1)
except:
print("Unexpected error: ")
print(sys.exc_info())
self.common_config.remove_temporary_files()
sys.exit(1)
# Esablish a SQLAlchemy connection to the Airflow database
airflowConnectStr = configuration.get("Airflow", "airflow_alchemy_conn")
try:
self.airflowDB = sa.create_engine(airflowConnectStr, echo = self.debugLogLevel)
self.airflowDB.connect()
self.airflowDBSession = sessionmaker(bind=self.airflowDB)
except sa.exc.OperationalError as err:
logging.error("%s"%err)
self.common_config.remove_temporary_files()
sys.exit(1)
except:
print("Unexpected error: ")
print(sys.exc_info())
self.common_config.remove_temporary_files()
sys.exit(1)
logging.debug("Executing Airflow.__init__() - Finished")
def checkExecution(self):
""" Checks the 'airflow_disable' settings and exit with 0 or 1 depending on that """
airflowExecutionDisabled = self.common_config.getConfigValue("airflow_disable")
if airflowExecutionDisabled == False:
print("Airflow execution is enabled")
self.common_config.remove_temporary_files()
sys.exit(0)
else:
print("Airflow execution is disabled")
self.common_config.remove_temporary_files()
sys.exit(1)
def getDBImportCommandPath(self, sudoUser=""):
if sudoUser == None or sudoUser == "":
sudoUser = self.defaultSudoUser
return self.dbimportCommandPath.replace("${SUDO_USER}", sudoUser)
def generateDAG(self, name=None, writeDAG=False, autoDAGonly=False, DAGFolder=None):
self.writeDAG = writeDAG
self.DAGFolder = DAGFolder
session = self.configDBSession()
airflowCustomDags = aliased(configSchema.airflowCustomDags)
airflowExportDags = aliased(configSchema.airflowExportDags)
airflowImportDags = aliased(configSchema.airflowImportDags)
airflowEtlDags = aliased(configSchema.airflowEtlDags)
exportDAG = pd.DataFrame(session.query(
airflowExportDags.dag_name,
airflowExportDags.schedule_interval,
airflowExportDags.filter_dbalias,
airflowExportDags.filter_target_schema,
airflowExportDags.filter_target_table,
airflowExportDags.retries,
airflowExportDags.auto_regenerate_dag,
airflowExportDags.sudo_user,
airflowExportDags.timezone
)
.select_from(airflowExportDags)
.all()).fillna('')
importDAG = pd.DataFrame(session.query(
airflowImportDags.dag_name,
airflowImportDags.schedule_interval,
airflowImportDags.filter_hive,
airflowImportDags.retries,
airflowImportDags.retries_stage1,
airflowImportDags.retries_stage2,
airflowImportDags.pool_stage1,
airflowImportDags.pool_stage2,
airflowImportDags.run_import_and_etl_separate,
airflowImportDags.finish_all_stage1_first,
airflowImportDags.auto_regenerate_dag,
airflowImportDags.sudo_user,
airflowImportDags.metadata_import,
airflowImportDags.timezone
)
.select_from(airflowImportDags)
.all()).fillna('')
etlDAG = pd.DataFrame(session.query(
airflowEtlDags.dag_name,
airflowEtlDags.schedule_interval,
airflowEtlDags.filter_job,
airflowEtlDags.filter_task,
airflowEtlDags.filter_source_db,
airflowEtlDags.filter_target_db,
airflowEtlDags.retries,
airflowEtlDags.auto_regenerate_dag,
airflowEtlDags.sudo_user,
airflowEtlDags.timezone
)
.select_from(airflowEtlDags)
.all()).fillna('')
customDAG = pd.DataFrame(session.query(
airflowCustomDags.dag_name,
airflowCustomDags.schedule_interval,
airflowCustomDags.retries,
airflowCustomDags.auto_regenerate_dag,
airflowCustomDags.sudo_user,
airflowCustomDags.timezone
)
.select_from(airflowCustomDags)
.all()).fillna('')
if name != None:
if importDAG.empty == False:
importDAG = importDAG.loc[importDAG['dag_name'] == name]
if exportDAG.empty == False:
exportDAG = exportDAG.loc[exportDAG['dag_name'] == name]
if customDAG.empty == False:
customDAG = customDAG.loc[customDAG['dag_name'] == name]
if autoDAGonly == True:
if importDAG.empty == False:
importDAG = importDAG.loc[importDAG['auto_regenerate_dag'] == 1]
if exportDAG.empty == False:
exportDAG = exportDAG.loc[exportDAG['auto_regenerate_dag'] == 1]
if customDAG.empty == False:
customDAG = customDAG.loc[customDAG['auto_regenerate_dag'] == 1]
dagFound = False
if name == None or len(importDAG) > 0:
dagFound = True
for index, row in importDAG.iterrows():
self.generateImportDAG(DAG=row)
if name == None or len(exportDAG) > 0:
dagFound = True
for index, row in exportDAG.iterrows():
self.generateExportDAG(DAG=row)
if name == None or len(customDAG) > 0:
dagFound = True
for index, row in customDAG.iterrows():
self.generateCustomDAG(DAG=row)
if dagFound == False and name != None:
logging.error("Can't find DAG with that name")
self.common_config.remove_temporary_files()
sys.exit(1)
session.close()
def getAirflowHostPoolName(self):
hostname = self.common_config.jdbc_hostname.lower().split("/")[0].split("\\")[0]
poolName = "DBImport_server_%s"%(hostname)
return poolName[0:50]
def generateExportDAG(self, DAG):
""" Generates a Import DAG """
usedPools = []
tableFilters = []
defaultPool = DAG['dag_name']
sudoUser = DAG['sudo_user']
usedPools.append(defaultPool)
cronSchedule = self.convertTimeToCron(DAG["schedule_interval"])
self.createDAGfileWithHeader(dagName = DAG['dag_name'], cronSchedule = cronSchedule, defaultPool = defaultPool, sudoUser = sudoUser, dagTimeZone = DAG['timezone'])
session = self.configDBSession()
exportTables = aliased(configSchema.exportTables)
exportTablesQuery = Query([exportTables.target_schema, exportTables.target_table, exportTables.dbalias, exportTables.airflow_priority, exportTables.export_type, exportTables.sqoop_last_mappers])
exportTablesQuery = exportTablesQuery.filter(exportTables.include_in_airflow == 1)
filterDBAlias = DAG['filter_dbalias'].strip().replace(r'*', '%')
filterTargetSchema = DAG['filter_target_schema'].strip().replace(r'*', '%')
filterTargetTable = DAG['filter_target_table'].strip().replace(r'*', '%')
if filterDBAlias == '':
logging.error("'filter_dbalias' in airflow_export_dags cant be empty")
self.DAGfile.close()
self.common_config.remove_temporary_files()
sys.exit(1)
exportTablesQuery = exportTablesQuery.filter(exportTables.dbalias.like(filterDBAlias))
if filterTargetSchema != '': exportTablesQuery = exportTablesQuery.filter(exportTables.target_schema.like(filterTargetSchema))
if filterTargetTable != '': exportTablesQuery = exportTablesQuery.filter(exportTables.target_table.like(filterTargetTable))
tables = pd.DataFrame(exportTablesQuery.with_session(session).all()).fillna('')
if DAG['retries'] == None or DAG['retries'] == '':
retries = 5
else:
retries = int(DAG['retries'])
# in 'tables' we now have all the tables that will be part of the DAG
previousConnectionAlias = ""
for index, row in tables.iterrows():
if row['dbalias'] != previousConnectionAlias:
# We save the previousConnectionAlias just to avoid making lookups for every dbalias even if they are all the same
try:
self.common_config.lookupConnectionAlias(connection_alias=row['dbalias'], decryptCredentials=False)
previousConnectionAlias = row['dbalias']
except invalidConfiguration as errMsg:
logging.warning("The connection alias '%s' cant be found in the configuration database"%(row['dbalias']))
previousConnectionAlias = None
continue
# exportPool = "DBImport_server_%s"%(self.common_config.jdbc_hostname.lower())
exportPool = self.getAirflowHostPoolName()
# usedPools is later used to check if the pools that we just are available in Airflow
if exportPool not in usedPools:
usedPools.append(exportPool)
taskID = row['target_table'].replace(r'/', '_').replace(r'.', '_')
dbexportCMD = "%sbin/export"%(self.getDBImportCommandPath(sudoUser = sudoUser))
dbexportClearStageCMD = "%sbin/manage --clearExportStage"%(self.getDBImportCommandPath(sudoUser = sudoUser))
airflowPriority = 1 # Default Airflow Priority
if row['airflow_priority'] != None and row['airflow_priority'] != '':
airflowPriority = int(row['airflow_priority'])
elif row['sqoop_last_mappers'] != None and row['sqoop_last_mappers'] != '':
airflowPriority = int(row['sqoop_last_mappers'])
clearStageRequired = False
if row['export_type'] == "full":
clearStageRequired = True
if clearStageRequired == True:
self.DAGfile.write("%s_clearStage = BashOperator(\n"%(taskID))
self.DAGfile.write(" task_id='%s_clearStage',\n"%(taskID))
self.DAGfile.write(" bash_command='%s -a %s -S %s -T %s ',\n"%(dbexportClearStageCMD, row['dbalias'], row['target_schema'], row['target_table']))
self.DAGfile.write(" pool='%s',\n"%(exportPool))
self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
self.DAGfile.write(" weight_rule='absolute',\n")
self.DAGfile.write(" retries=%s,\n"%(retries))
self.DAGfile.write(" dag=dag)\n")
self.DAGfile.write("\n")
self.DAGfile.write("%s = BashOperator(\n"%(taskID))
self.DAGfile.write(" task_id='%s',\n"%(taskID))
self.DAGfile.write(" bash_command='%s -a %s -S %s -T %s ',\n"%(dbexportCMD, row['dbalias'], row['target_schema'], row['target_table']))
self.DAGfile.write(" pool='%s',\n"%(exportPool))
self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
self.DAGfile.write(" weight_rule='absolute',\n")
self.DAGfile.write(" retries=%s,\n"%(retries))
self.DAGfile.write(" dag=dag)\n")
self.DAGfile.write("\n")
if clearStageRequired == True:
self.DAGfile.write("%s.set_downstream(%s_clearStage)\n"%(self.mainStartTask, taskID))
self.DAGfile.write("%s_clearStage.set_downstream(%s)\n"%(taskID, taskID))
self.DAGfile.write("%s.set_downstream(%s)\n"%(taskID, self.mainStopTask))
else:
self.DAGfile.write("%s.set_downstream(%s)\n"%(self.mainStartTask, taskID))
self.DAGfile.write("%s.set_downstream(%s)\n"%(taskID, self.mainStopTask))
self.DAGfile.write("\n")
self.addTasksToDAGfile(dagName = DAG['dag_name'], mainDagSchedule=DAG["schedule_interval"], defaultRetries=retries, defaultSudoUser=sudoUser)
self.addSensorsToDAGfile(dagName = DAG['dag_name'], mainDagSchedule=DAG["schedule_interval"])
self.createAirflowPools(pools=usedPools)
self.closeDAGfile()
session.close()
def generateImportDAG(self, DAG):
""" Generates a Import DAG """
importPhaseFinishFirst = False
if DAG['finish_all_stage1_first'] == 1:
importPhaseFinishFirst = True
runImportAndEtlSeparate = False
if DAG['run_import_and_etl_separate'] == 1:
runImportAndEtlSeparate = True
usedPools = []
tableFilters = []
defaultPool = DAG['dag_name']
sudoUser = DAG['sudo_user']
# metaDataImport = DAG['metadata_import']
usedPools.append(defaultPool)
if DAG['metadata_import'] == 1:
metaDataImportOption = "-m"
else:
metaDataImportOption = ""
cronSchedule = self.convertTimeToCron(DAG["schedule_interval"])
self.createDAGfileWithHeader(dagName = DAG['dag_name'], cronSchedule = cronSchedule, importPhaseFinishFirst = importPhaseFinishFirst, defaultPool = defaultPool, sudoUser = sudoUser, dagTimeZone = DAG['timezone'])
session = self.configDBSession()
importTables = aliased(configSchema.importTables)
importTablesQuery = Query([importTables.hive_db, importTables.hive_table, importTables.dbalias, importTables.airflow_priority, importTables.import_type, importTables.import_phase_type, importTables.etl_phase_type, importTables.sqoop_last_mappers, importTables.copy_slave])
importTablesQuery = importTablesQuery.filter(importTables.include_in_airflow == 1)
for hiveTarget in DAG['filter_hive'].split(';'):
hiveDB = hiveTarget.split(".")[0].strip().replace(r'*', '%')
hiveTable = hiveTarget.split(".")[1].strip().replace(r'*', '%')
if hiveDB == None or hiveTable == None or hiveDB == "" or hiveTable == "":
logging.error("Syntax for filter_hive column is <HIVE_DB>.<HIVE_TABLE>;<HIVE_DB>.<HIVE_TABLE>;.....")
self.DAGfile.close()
self.common_config.remove_temporary_files()
sys.exit(1)
tableFilters.append((importTables.hive_db.like(hiveDB)) & (importTables.hive_table.like(hiveTable)))
importTablesQuery = importTablesQuery.filter(sa.or_(*tableFilters))
tables = pd.DataFrame(importTablesQuery.with_session(session).all()).fillna('')
retries=int(DAG['retries'])
try:
retriesImportPhase = int(DAG['retries_stage1'])
except ValueError:
retriesImportPhase = retries
try:
retriesEtlPhase = int(DAG['retries_stage2'])
except ValueError:
retriesEtlPhase = retries
# in 'tables' we now have all the tables that will be part of the DAG
previousConnectionAlias = ""
for index, row in tables.iterrows():
try:
if row['dbalias'] != previousConnectionAlias:
# We save the previousConnectionAlias just to avoid making lookups for every dbalias even if they are all the same
self.common_config.lookupConnectionAlias(connection_alias=row['dbalias'], decryptCredentials=False)
previousConnectionAlias = row['dbalias']
except:
continue
# importPhasePool = "DBImport_server_%s"%(self.common_config.jdbc_hostname.lower())
importPhasePool = self.getAirflowHostPoolName()
etlPhasePool = DAG['dag_name'][0:50]
if row['copy_slave'] == 1:
importPhaseAsSensor = True
else:
importPhaseAsSensor = False
# if row['hive_db'] == "user_boszkk" and row['hive_table'] == "tbl_full_mysql":
# importPhaseAsSensor = True
if DAG['pool_stage1'] != '':
importPhasePool = DAG['pool_stage1']
if DAG['pool_stage2'] != '':
etlPhasePool = DAG['pool_stage2']
# usedPools is later used to check if the pools that we just are available in Airflow
if importPhasePool not in usedPools:
usedPools.append(importPhasePool)
if etlPhasePool not in usedPools:
usedPools.append(etlPhasePool)
dbimportCMD = "%sbin/import"%(self.getDBImportCommandPath(sudoUser = sudoUser))
dbimportClearStageCMD = "%sbin/manage --clearImportStage"%(self.getDBImportCommandPath(sudoUser = sudoUser))
taskID = row['hive_table'].replace(r'/', '_').replace(r'.', '_')
airflowPriority = 1 # Default Airflow Priority
if row['airflow_priority'] != None and row['airflow_priority'] != '':
airflowPriority = int(row['airflow_priority'])
elif row['sqoop_last_mappers'] != None and row['sqoop_last_mappers'] != '':
airflowPriority = int(row['sqoop_last_mappers'])
clearStageRequired = False
if row['import_type'] in ("full_direct", "full", "oracle_flashback_merge", "full_history", "full_merge_direct_history", "full_merge_direct", "full_append"):
clearStageRequired = True
if row['import_phase_type'] in ("full", "oracle_flashback", "mssql_change_tracking"):
clearStageRequired = True
if clearStageRequired == True:
self.DAGfile.write("%s_clearStage = BashOperator(\n"%(taskID))
self.DAGfile.write(" task_id='%s_clearStage',\n"%(taskID))
self.DAGfile.write(" bash_command='%s -h %s -t %s ',\n"%(dbimportClearStageCMD, row['hive_db'], row['hive_table']))
self.DAGfile.write(" pool='%s',\n"%(importPhasePool))
self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
self.DAGfile.write(" weight_rule='absolute',\n")
self.DAGfile.write(" retries=%s,\n"%(retries))
self.DAGfile.write(" dag=dag)\n")
self.DAGfile.write("\n")
if DAG['finish_all_stage1_first'] == 1 or runImportAndEtlSeparate == True:
if importPhaseAsSensor == True:
# Running Import phase as a sensor
self.DAGfile.write("%s_sensor = SqlSensor(\n"%(taskID))
self.DAGfile.write(" task_id='%s_sensor',\n"%(taskID))
self.DAGfile.write(" conn_id='DBImport',\n")
self.DAGfile.write(" sql=\"\"\"select count(*) from import_tables where hive_db = '%s' and hive_table = '%s' and "%(row['hive_db'], row['hive_table']))
self.DAGfile.write("copy_finished >= '{{ next_execution_date.strftime('%Y-%m-%d %H:%M:%S.%f') }}'\"\"\",\n")
self.DAGfile.write(" pool='%s',\n"%(importPhasePool))
# if DAG['finish_all_stage1_first'] == 1:
# # If all stage1 is to be completed first, then we need to have prio on the stage1 task aswell as
# # the prio from stage 2 will all be summed up in 'stage1_complete' dummy task
# self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
# else:
# self.DAGfile.write(" priority_weight=0,\n")
self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
self.DAGfile.write(" weight_rule='absolute',\n")
self.DAGfile.write(" timeout=18000,\n")
self.DAGfile.write(" poke_interval=300,\n")
self.DAGfile.write(" mode='reschedule',\n")
self.DAGfile.write(" dag=dag)\n")
self.DAGfile.write("\n")
self.DAGfile.write("%s_import = BashOperator(\n"%(taskID))
self.DAGfile.write(" task_id='%s_import',\n"%(taskID))
self.DAGfile.write(" bash_command='%s -h %s -t %s -I -C ',\n"%(dbimportCMD, row['hive_db'], row['hive_table']))
self.DAGfile.write(" pool='%s',\n"%(importPhasePool))
# if DAG['finish_all_stage1_first'] == 1:
# # If all stage1 is to be completed first, then we need to have prio on the stage1 task aswell as
# # the prio from stage 2 will all be summed up in 'stage1_complete' dummy task
# self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
# else:
# self.DAGfile.write(" priority_weight=0,\n")
self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
self.DAGfile.write(" weight_rule='absolute',\n")
self.DAGfile.write(" retries=%s,\n"%(retriesImportPhase))
self.DAGfile.write(" dag=dag)\n")
self.DAGfile.write("\n")
self.DAGfile.write("%s_etl = BashOperator(\n"%(taskID))
self.DAGfile.write(" task_id='%s_etl',\n"%(taskID))
self.DAGfile.write(" bash_command='%s -h %s -t %s -E ',\n"%(dbimportCMD, row['hive_db'], row['hive_table']))
self.DAGfile.write(" pool='%s',\n"%(etlPhasePool))
self.DAGfile.write(" priority_weight=%s,\n"%(airflowPriority))
self.DAGfile.write(" weight_rule='absolute',\n")
self.DAGfile.write(" retries=%s,\n"%(retriesEtlPhase))
self.DAGfile.write(" dag=dag)\n")
self.DAGfile.write("\n")
if clearStageRequired == True and DAG['finish_all_stage1_first'] == 1:
self.DAGfile.write("%s.set_downstream(%s_clearStage)\n"%(self.mainStartTask, taskID))
if importPhaseAsSensor == True:
self.DAGfile.write("%s_clearStage.set_downstream(%s_sensor)\n"%(taskID, taskID))
self.DAGfile.write("%s_sensor.set_downstream(%s_import)\n"%(taskID, taskID))
else:
self.DAGfile.write("%s_clearStage.set_downstream(%s_import)\n"%(taskID, taskID))
self.DAGfile.write("%s_import.set_downstream(Import_Phase_Finished)\n"%(taskID))
self.DAGfile.write("Import_Phase_Finished.set_downstream(%s_etl)\n"%(taskID))
self.DAGfile.write("%s_etl.set_downstream(%s)\n"%(taskID, self.mainStopTask))
elif clearStageRequired == True and DAG['finish_all_stage1_first'] == 0: # This means that runImportAndEtlSeparate == True
self.DAGfile.write("%s.set_downstream(%s_clearStage)\n"%(self.mainStartTask, taskID))
if importPhaseAsSensor == True:
self.DAGfile.write("%s_clearStage.set_downstream(%s_sensor)\n"%(taskID, taskID))
self.DAGfile.write("%s_sensor.set_downstream(%s_import)\n"%(taskID, taskID))
else:
self.DAGfile.write("%s_clearStage.set_downstream(%s_import)\n"%(taskID, taskID))
self.DAGfile.write("%s_import.set_downstream(%s_etl)\n"%(taskID, taskID))
self.DAGfile.write("%s_etl.set_downstream(%s)\n"%(taskID, self.mainStopTask))
elif clearStageRequired == False and DAG['finish_all_stage1_first'] == | |
import os, sys, time, copy, glob
from collections import deque
import gym
from gym import spaces
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ppo.a2c_ppo_acktr import algo
from ppo.a2c_ppo_acktr.arguments import get_args
from ppo.a2c_ppo_acktr.envs import make_vec_envs
from ppo.a2c_ppo_acktr.model import Policy
from ppo.a2c_ppo_acktr.storage import RolloutStorage
from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule
from ppo.a2c_ppo_acktr.visualize import visdom_plot
args = get_args()
assert args.algo in ['a2c', 'ppo', 'acktr']
if args.recurrent_policy:
assert args.algo in ['a2c', 'ppo'], \
'Recurrent policy is not implemented for ACKTR'
if args.num_rollouts > 0:
assert args.num_rollouts % args.num_processes == 0, 'num_rollouts must be divisable by num_processes'
num_updates = int(args.num_env_steps) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
try:
os.makedirs(args.log_dir)
except OSError:
files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))
try:
for f in files:
os.remove(f)
except PermissionError as e:
pass
eval_log_dir = args.log_dir + "_eval"
try:
os.makedirs(eval_log_dir)
except OSError:
files = glob.glob(os.path.join(eval_log_dir, '*.monitor.csv'))
try:
for f in files:
os.remove(f)
except PermissionError as e:
pass
def main():
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
if args.vis:
from visdom import Visdom
viz = Visdom(port=args.port)
win = None
envs = make_vec_envs(args.env_name, args.seed, 1,
args.gamma, args.log_dir, args.add_timestep, device, False)
# Determine if this is a dual robot (multi agent) environment.
obs = envs.reset()
action = torch.tensor([envs.action_space.sample()])
_, _, _, info = envs.step(action)
dual_robots = 'dual_robots' in info[0]
if dual_robots:
obs_robot_len = info[0]['obs_robot_len'] // 2
action_robot_len = info[0]['action_robot_len'] // 2
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
if len(obs_robot1[0]) != obs_robot_len or len(obs_robot2[0]) != obs_robot_len:
print('robot 1 obs shape:', len(obs_robot1[0]), 'obs space robot shape:', (obs_robot_len,))
print('robot 2 obs shape:', len(obs_robot2[0]), 'obs space robot shape:', (obs_robot_len,))
exit()
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, args.add_timestep, device, False)
if dual_robots:
# Reset environment
obs = envs.reset()
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
action_space_robot1 = spaces.Box(low=np.array([-1.0]*action_robot_len), high=np.array([1.0]*action_robot_len), dtype=np.float32)
action_space_robot2 = spaces.Box(low=np.array([-1.0]*action_robot_len), high=np.array([1.0]*action_robot_len), dtype=np.float32)
if args.load_policy is not None:
if dual_robots:
actor_critic_robot1, actor_critic_robot2, ob_rms = torch.load(args.load_policy)
else:
actor_critic, ob_rms = torch.load(args.load_policy)
vec_norm = get_vec_normalize(envs)
if vec_norm is not None:
vec_norm.ob_rms = ob_rms
else:
if dual_robots:
actor_critic_robot1 = Policy([obs_robot_len], action_space_robot1,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic_robot2 = Policy([obs_robot_len], action_space_robot2,
base_kwargs={'recurrent': args.recurrent_policy})
else:
actor_critic = Policy(envs.observation_space.shape, envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy, 'hidden_size': args.hidden_size})
if dual_robots:
actor_critic_robot1.to(device)
actor_critic_robot2.to(device)
else:
actor_critic.to(device)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, lr=args.lr,
eps=args.eps, alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
if dual_robots:
agent_robot1 = algo.PPO(actor_critic_robot1, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
agent_robot2 = algo.PPO(actor_critic_robot2, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
else:
agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, acktr=True)
if dual_robots:
rollouts_robot1 = RolloutStorage(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else args.num_processes,
[obs_robot_len], action_space_robot1,
actor_critic_robot1.recurrent_hidden_state_size)
rollouts_robot2 = RolloutStorage(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else args.num_processes,
[obs_robot_len], action_space_robot2,
actor_critic_robot2.recurrent_hidden_state_size)
if args.num_rollouts > 0:
rollouts_robot1.obs[0].copy_(torch.cat([obs_robot1 for _ in range(args.num_rollouts // args.num_processes)] + [obs_robot1[:(args.num_rollouts % args.num_processes)]], dim=0))
rollouts_robot2.obs[0].copy_(torch.cat([obs_robot2 for _ in range(args.num_rollouts // args.num_processes)] + [obs_robot2[:(args.num_rollouts % args.num_processes)]], dim=0))
else:
rollouts_robot1.obs[0].copy_(obs_robot1)
rollouts_robot2.obs[0].copy_(obs_robot2)
rollouts_robot1.to(device)
rollouts_robot2.to(device)
else:
rollouts = RolloutStorage(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
if args.num_rollouts > 0:
rollouts.obs[0].copy_(torch.cat([obs for _ in range(args.num_rollouts // args.num_processes)] + [obs[:(args.num_rollouts % args.num_processes)]], dim=0))
else:
rollouts.obs[0].copy_(obs)
rollouts.to(device)
deque_len = args.num_rollouts if args.num_rollouts > 0 else (args.num_processes if args.num_processes > 10 else 10)
if dual_robots:
episode_rewards_robot1 = deque(maxlen=deque_len)
episode_rewards_robot2 = deque(maxlen=deque_len)
else:
episode_rewards = deque(maxlen=deque_len)
start = time.time()
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
if args.algo == "acktr":
# use optimizer's learning rate since it's hard-coded in kfac.py
update_linear_schedule(agent.optimizer, j, num_updates, agent.optimizer.lr)
else:
if dual_robots:
update_linear_schedule(agent_robot1.optimizer, j, num_updates, args.lr)
update_linear_schedule(agent_robot2.optimizer, j, num_updates, args.lr)
else:
update_linear_schedule(agent.optimizer, j, num_updates, args.lr)
if args.algo == 'ppo' and args.use_linear_clip_decay:
if dual_robots:
agent_robot1.clip_param = args.clip_param * (1 - j / float(num_updates))
agent_robot2.clip_param = args.clip_param * (1 - j / float(num_updates))
else:
agent.clip_param = args.clip_param * (1 - j / float(num_updates))
reward_list_robot1 = [[] for _ in range(args.num_processes)]
reward_list_robot2 = [[] for _ in range(args.num_processes)]
for step in range(args.num_steps):
# Sample actions
# obs = self.apply_attack(obs, args.phi, args.epsilon)
with torch.no_grad():
if dual_robots:
value_robot1, action_robot1, action_log_prob_robot1, recurrent_hidden_states_robot1 = actor_critic_robot1.act(
rollouts_robot1.obs[step, :args.num_processes],
rollouts_robot1.recurrent_hidden_states[step, :args.num_processes],
rollouts_robot1.masks[step, :args.num_processes])
value_robot2, action_robot2, action_log_prob_robot2, recurrent_hidden_states_robot2 = actor_critic_robot2.act(
rollouts_robot2.obs[step, :args.num_processes],
rollouts_robot2.recurrent_hidden_states[step, :args.num_processes],
rollouts_robot2.masks[step, :args.num_processes])
else:
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step, :args.num_processes],
rollouts.recurrent_hidden_states[step, :args.num_processes],
rollouts.masks[step, :args.num_processes])
# Obser reward and next obs
if dual_robots:
action = torch.cat((action_robot1, action_robot2), dim=-1)
obs, reward, done, infos = envs.step(action)
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
for i, info in enumerate(infos):
reward_list_robot1[i].append(info['reward_robot1'])
reward_list_robot2[i].append(info['reward_robot2'])
reward_robot1 = torch.tensor([[info['reward_robot1']] for info in infos])
reward_robot2 = torch.tensor([[info['reward_robot2']] for info in infos])
else:
obs, reward, done, infos = envs.step(action)
for i, info in enumerate(infos):
if 'episode' in info.keys():
if dual_robots:
episode_rewards_robot1.append(np.sum(reward_list_robot1[i]))
episode_rewards_robot2.append(np.sum(reward_list_robot2[i]))
else:
episode_rewards.append(info['episode']['r'])
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0]
for done_ in done])
if dual_robots:
rollouts_robot1.insert(obs_robot1, recurrent_hidden_states_robot1, action_robot1, action_log_prob_robot1, value_robot1, reward_robot1, masks)
rollouts_robot2.insert(obs_robot2, recurrent_hidden_states_robot2, action_robot2, action_log_prob_robot2, value_robot2, reward_robot2, masks)
else:
rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks)
if args.num_rollouts > 0 and (j % (args.num_rollouts // args.num_processes) != 0):
# Only update the policies when we have performed num_rollouts simulations
continue
with torch.no_grad():
if dual_robots:
next_value_robot1 = actor_critic_robot1.get_value(rollouts_robot1.obs[-1],
rollouts_robot1.recurrent_hidden_states[-1],
rollouts_robot1.masks[-1]).detach()
next_value_robot2 = actor_critic_robot2.get_value(rollouts_robot2.obs[-1],
rollouts_robot2.recurrent_hidden_states[-1],
rollouts_robot2.masks[-1]).detach()
else:
next_value = actor_critic.get_value(rollouts.obs[-1],
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
if dual_robots:
rollouts_robot1.compute_returns(next_value_robot1, args.use_gae, args.gamma, args.tau)
rollouts_robot2.compute_returns(next_value_robot2, args.use_gae, args.gamma, args.tau)
value_loss_robot1, action_loss_robot1, dist_entropy_robot1 = agent_robot1.update(rollouts_robot1)
value_loss_robot2, action_loss_robot2, dist_entropy_robot2 = agent_robot2.update(rollouts_robot2)
rollouts_robot1.after_update()
rollouts_robot2.after_update()
else:
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save for every interval-th episode or for the last epoch
if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
if dual_robots:
save_model_robot1 = actor_critic_robot1
save_model_robot2 = actor_critic_robot2
if args.cuda:
save_model_robot1 = copy.deepcopy(actor_critic_robot1).cpu()
save_model_robot2 = copy.deepcopy(actor_critic_robot2).cpu()
save_model = [save_model_robot1, save_model_robot2,
getattr(get_vec_normalize(envs), 'ob_rms', None)]
else:
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
save_model = [save_model,
getattr(get_vec_normalize(envs), 'ob_rms', None)]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and (len(episode_rewards_robot1) > 1 if dual_robots else len(episode_rewards) > 1):
end = time.time()
if dual_robots:
print("Robot1 updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}".
format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards_robot1),
np.mean(episode_rewards_robot1),
np.median(episode_rewards_robot1),
np.min(episode_rewards_robot1),
np.max(episode_rewards_robot1), dist_entropy_robot1,
value_loss_robot1, action_loss_robot1))
print("Robot2 updates {}, Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n".
format(j, len(episode_rewards_robot2),
np.mean(episode_rewards_robot2),
np.median(episode_rewards_robot2),
np.min(episode_rewards_robot2),
np.max(episode_rewards_robot2), dist_entropy_robot2,
value_loss_robot2, action_loss_robot2))
else:
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n".
format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards), dist_entropy,
value_loss, action_loss))
sys.stdout.flush()
if (args.eval_interval is not None
and len(episode_rewards) > 1
and j % args.eval_interval == 0):
eval_envs = make_vec_envs(
args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, args.add_timestep, device, True)
vec_norm = get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = get_vec_normalize(envs).ob_rms
if dual_robots:
eval_episode_rewards_robot1 = []
eval_episode_rewards_robot2 = []
else:
eval_episode_rewards = []
obs = eval_envs.reset()
if dual_robots:
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
eval_recurrent_hidden_states_robot1 = torch.zeros(args.num_processes,
actor_critic_robot1.recurrent_hidden_state_size, device=device)
eval_recurrent_hidden_states_robot2 = torch.zeros(args.num_processes,
actor_critic_robot2.recurrent_hidden_state_size, device=device)
else:
eval_recurrent_hidden_states = torch.zeros(args.num_processes,
actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
eval_reward_list_robot1 = [[] for _ in range(args.num_processes)]
eval_reward_list_robot2 = [[] for _ in range(args.num_processes)]
while (len(eval_episode_rewards_robot1) < 10 if dual_robots else len(eval_episode_rewards) < 10):
with torch.no_grad():
if dual_robots:
_, action_robot1, _, eval_recurrent_hidden_states_robot1 = actor_critic_robot1.act(
obs_robot1, eval_recurrent_hidden_states_robot1, eval_masks, deterministic=True)
_, action_robot2, _, eval_recurrent_hidden_states_robot2 = actor_critic_robot2.act(
obs_robot2, eval_recurrent_hidden_states_robot2, eval_masks, deterministic=True)
else:
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs, eval_recurrent_hidden_states, eval_masks, deterministic=True)
# Obser reward and next obs
if dual_robots:
action = torch.cat((action_robot1, action_robot2), dim=-1)
obs, reward, done, infos = eval_envs.step(action)
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
for i, info in enumerate(infos):
eval_reward_list_robot1[i].append(info['reward_robot1'])
eval_reward_list_robot2[i].append(info['reward_robot2'])
else:
obs, reward, done, infos = eval_envs.step(action)
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0]
for done_ in done])
reset_rewards | |
mark_mean: array_like
array of the mean value of `data_arr`.
Only returned if `return_mean_std == True`
mark_std: array_like
array of the St. Dev. value of `data_arr`.
Only returned if `return_mean_std == True`
"""
## Creating dictionary for saving `sigma`s
sigma_dict = {}
for ii in range(len(perc_arr)):
sigma_dict[ii] = []
## Using Percentiles to estimate errors
if type_sigma=='perc':
for ii, perc_ii in enumerate(perc_arr):
mark_lower = num.nanpercentile(data_arr, 50.-(perc_ii/2.),axis=1)
mark_upper = num.nanpercentile(data_arr, 50.+(perc_ii/2.),axis=1)
# Saving to dictionary
sigma_dict[ii] = num.column_stack((mark_lower, mark_upper)).T
## Using standard deviations to estimate errors
if type_sigma=='std':
mean_val = num.nanmean(data_arr, axis=1)
std_val = num.nanstd( data_arr, axis=1)
for ii in range(len(perc_arr)):
mark_lower = mean_val - ((ii+1) * std_val)
mark_upper = mean_val + ((ii+1) * std_val)
# Saving to dictionary
sigma_dict[ii] = num.column_stack((mark_lower, mark_upper)).T
##
## Estimating mean and St. Dev. of `data_arr`
mark_mean = num.nanmean(data_arr, axis=1)
mark_std = num.nanstd (data_arr, axis=1)
if return_mean_std:
return sigma_dict, mark_mean, mark_std
else:
return sigma_dict
## --------- Analysis functions ------------##
def frac_prop_calc(df_bin_org, prop, param_dict, catl_keys_dict):
"""
Computes the quenched fractions of satellites in a given mass bin.
Parameters
----------
df_bin_org: pandas DataFrame
Dataframe for the selected group/halo mass bin
prop: string
galaxy property being evaluated
param_dict: python dictionary
dictionary with input parameters and values
catl_keys_dict: python dictionary
dictionary containing keys for the galaxy properties in catalogue
Returns
----------
"""
## Program message
Prog_msg = param_dict['Prog_msg']
## Constants
Cens = int(1)
Sats = int(0)
itern = param_dict['itern_tot']
## Catalogue Variables for galaxy properties
gm_key = catl_keys_dict['gm_key']
id_key = catl_keys_dict['id_key']
galtype_key = catl_keys_dict['galtype_key']
## Group statistics
groupid_unq = df_bin_org[id_key].unique()
ngroups = groupid_unq.shape[0]
##
## Selecting columns
df_bin_mod = df_bin_org.copy()[[galtype_key, id_key, prop]]
##
## Normalizing `prop` by the `prop_lim`
df_bin_mod.loc[:,prop] /= param_dict['prop_lim'][prop]
##
## Determining galaxy fractions for `df_bin_mod`
cens_pd_org = df_bin_mod.loc[(df_bin_mod[galtype_key]==Cens)].copy().reset_index()
sats_pd_org = df_bin_mod.loc[(df_bin_mod[galtype_key]==Sats)].copy().reset_index()
##
## Quench Satellite fraction
sat_quenched_frac = frac_stat_calc( cens_pd_org ,
sats_pd_org ,
prop ,
catl_keys_dict,
param_dict ,
frac_stat=param_dict['frac_stat'])
##
## Converting `sat_quenched_frac` to numpy array
sat_quenched_frac = sat_quenched_frac
##
## Calculation fractions for Shuffles
sat_quenched_frac_sh = num.zeros((param_dict['itern_tot'],))
# ProgressBar properties
if param_dict['prog_bar']:
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar_mock = ProgressBar( widgets=widgets, maxval= 10 * itern).start()
## Iterating `itern` times and calculating quenched fractions
for ii in range(itern):
## Quenched fractions
sat_quenched_frac_sh[ii] = frac_stat_calc(cens_pd_org ,
sats_pd_org ,
prop ,
catl_keys_dict,
param_dict ,
shuffle=True ,
frac_stat=param_dict['frac_stat'])
if param_dict['prog_bar']:
pbar_mock.update(ii)
if param_dict['prog_bar']:
pbar_mock.finish()
return sat_quenched_frac, sat_quenched_frac_sh.T
def frac_stat_calc(cens_pd_org, sats_pd_org, prop, catl_keys_dict, param_dict,
frac_stat='diff', shuffle=False):
"""
Computes quenched fractions of satellites for a given galaxy property
`prop` in a given mass bin.
Parameters
----------
cens_pd_org: pandas DataFrame
dataframe with only central galaxies in the given group mass bin.
Centrals belong to groups with galaxies >= `param_dict['ngals_min']`
sats_pd_org: pandas DataFrame
dataframe with only satellite galaxies in the given group mass bin.
Satellites belong to groups with galaxies >= `param_dict['ngals_min']`
prop: string
galaxy property being analyzed
catl_keys_dict: python dictionary
dictionary containing keys for the galaxy properties in catalogue
param_dict: python dictionary
dictionary with input parameters and values
frac_stat: string, optional (default = 'diff')
statistics to use to evaluate the conformity signal
Options:
- 'diff' : Takes the difference between P(sat=q|cen=q) and
P(sat=q|cen=act)
- 'ratio': Takes the ratio between P(sat=q|cen=q) and
P(sat=q|cen=act)
shuffle: boolean, optional (default = False)
option for shuffling the galaxies' properties among themselves, i.e.
centrals among centrals, and satellites among satellites.
Returns
-------
frac_sat_pas_cen_act: float
number of `passive` satellites around `active` centrals over the
total number of satelltes around `active` centrals
frac_sat_pas_cen_pas: float
number of `passive` satellites around `passive` centrals over the
total number of satelltes around `passive` centrals
frac_stat: float
'ratio' or 'difference' of between P(sat=q|cen=q) and P(sat=q|cen=act)
"""
## Keys for group/halo ID, mass, and galaxy type
gm_key = catl_keys_dict['gm_key']
id_key = catl_keys_dict['id_key']
galtype_key = catl_keys_dict['galtype_key']
## Copies of `cens_pd_org` and `sats_pd_org`
cens_pd = cens_pd_org.copy()
sats_pd = sats_pd_org.copy()
## Choosing if to shuffle `prop`
if shuffle:
## Choosing which kind of shuffle to use
# Shuffling only Centrals
if param_dict['shuffle_marks'] == 'cen_sh':
cens_prop_sh = cens_pd[prop].copy().values
num.random.shuffle(cens_prop_sh)
cens_pd.loc[:,prop] = cens_prop_sh
# Shuffling only Satellites
if param_dict['shuffle_marks'] == 'sat_sh':
sats_prop_sh = sats_pd[prop].copy().values
num.random.shuffle(sats_prop_sh)
sats_pd.loc[:,prop] = sats_prop_sh
# Shuffling both Centrals and Satellites
if param_dict['shuffle_marks'] == 'censat_sh':
# Centrals
cens_prop_sh = cens_pd[prop].copy().values
num.random.shuffle(cens_prop_sh)
cens_pd.loc[:,prop] = cens_prop_sh
# Satellites
sats_prop_sh = sats_pd[prop].copy().values
num.random.shuffle(sats_prop_sh)
sats_pd.loc[:,prop] = sats_prop_sh
##
## Separating fractions for Centrals and Satellites
cens_act = cens_pd.loc[(cens_pd[prop]) <= 1]
cens_pas = cens_pd.loc[(cens_pd[prop]) > 1]
##
## Groups for each `act` and `pas` centrals
cens_act_groups = cens_act[id_key].values
cens_pas_groups = cens_pas[id_key].values
##
## Satellites residing in groups/halos with `act` and `pas` centrals
sats_c_act = sats_pd.loc[(sats_pd[id_key].isin(cens_act_groups))]
sats_c_pas = sats_pd.loc[(sats_pd[id_key].isin(cens_pas_groups))]
## Total number of satellites in around each type of central
sats_c_act_tot = len(sats_c_act)
sats_c_pas_tot = len(sats_c_pas)
##
## Number of quenched satellites around each type of centrals
sats_pas_c_act = sats_c_act.loc[sats_c_act[prop] > 1]
sats_pas_c_pas = sats_c_pas.loc[sats_c_pas[prop] > 1]
sats_pas_c_act_tot = len(sats_pas_c_act)
sats_pas_c_pas_tot = len(sats_pas_c_pas)
##
## Quenched fractions of satellites
# Passive Satellites around `active` centrals
if sats_c_act_tot != 0:
frac_sat_pas_cen_act = sats_pas_c_act_tot/float(sats_c_act_tot)
else:
frac_sat_pas_cen_act = num.nan
# Passive Satellites around `passive` centrals
if sats_c_pas_tot != 0:
frac_sat_pas_cen_pas = sats_pas_c_pas_tot/float(sats_c_pas_tot)
else:
frac_sat_pas_cen_pas = num.nan
##
## Evaluating `frac_stat`
# Taking the difference of fractions
if frac_stat=='diff':
frac_res = (frac_sat_pas_cen_pas - frac_sat_pas_cen_act)
# Taking the ratio of fractions
if frac_stat == 'ratio':
frac_res = (frac_sat_pas_cen_pas / frac_sat_pas_cen_act)
return frac_res
def gm_fractions_calc(catl_pd, catl_name, param_dict, proj_dict):
"""
Computes the 'quenched' satellite fractions for galaxy groups.
Splits the sample into group mass bins, and determines the quenched
fraction of satellite for a given galaxy property.
Parameters
----------
catl_pd: pandas DataFrame
DataFrame with information on catalogue
catl_name: string
name of the `catl_pd`
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
Dictionary with current and new paths to project directories
"""
Prog_msg = param_dict['Prog_msg']
### Catalogue Variables
# `Group mass`, `groupid`, and `galtype` keys
gm_key, id_key, galtype_key = cmcu.catl_keys(catl_kind=param_dict['catl_kind'],
return_type='list',
perf_opt=param_dict['perf_opt'])
catl_keys_dict = cmcu.catl_keys( catl_kind=param_dict['catl_kind'],
return_type='dict',
perf_opt=param_dict['perf_opt'])
gm_key = catl_keys_dict['gm_key']
id_key = catl_keys_dict['id_key']
galtype_key = catl_keys_dict['galtype_key']
# ssfr and mstar keys
ssfr_key, mstar_key = cmcu.catl_keys_prop(catl_kind=param_dict['catl_kind'],
catl_info='members')
# Galaxy Properties
if param_dict['catl_kind']=='data':
pd_keys = ['logssfr', 'g_r', 'sersic']
elif param_dict['catl_kind']=='mocks':
# pd_keys = ['logssfr']
pd_keys = ['logssfr', 'g_r', 'sersic']
# Limits for each galaxy property
prop_lim = {'logssfr':-11,
'sersic':3,
'g_r':0.75}
param_dict['prop_lim'] = prop_lim
# Cleaning catalogue with groups of N > `ngals_min`
catl_pd_clean = cmcu.sdss_catl_clean_nmin(catl_pd, param_dict['catl_kind'],
nmin=param_dict['ngals_min'])
### Mass limits
GM_min = catl_pd_clean[gm_key].min()
GM_max = catl_pd_clean[gm_key].max()
GM_arr = cstats.Bins_array_create([GM_min,GM_max], param_dict['Mg_bin'])
GM_bins = [[GM_arr[ii],GM_arr[ii+1]] for ii in range(GM_arr.shape[0]-1)]
GM_bins = num.asarray(GM_bins)
GM_keys = ['{0:.2f}_{1:.2f}'.format(GM_arr[xx],GM_arr[xx+1])\
for xx in range(len(GM_arr)-1)]
### Pickle file
p_file = [ proj_dict['pickdir'] , param_dict['fig_idx'] ,
catl_name , param_dict['sample'] ,
param_dict['corr_type'] , param_dict['prop_log'] ,
param_dict['shuffle_marks'], param_dict['ngals_min'],
param_dict['perf_str'] ]
p_fname = '{0}/{1}_{2}_Mr{3}_{4}_{5}_{6}_{7}_{8}.p'
p_fname = p_fname.format(*p_file)
##
## Checking if file exists
if (os.path.isfile(p_fname)) and (param_dict['remove_files']):
os.remove(p_fname)
print('{0} `p_fname` ({1}) removed! Calculating MCF statistics!'.format(
Prog_msg, p_fname))
## Dictionary for storing results for each GM bin and galaxy property
frac_gm_dict = dict(zip(GM_keys, [[] for x in range(len(GM_keys))]))
stat_vals = [num.zeros((len(GM_keys))), copy.deepcopy(frac_gm_dict)]
GM_prop_dict = dict(zip(pd_keys,[list(copy.deepcopy(stat_vals)) \
for x in range(len(pd_keys))]))
## Looping ovr each GM bin (if file does not exist for the given catalogue)
if not (os.path.isfile(p_fname)):
## Looping over mass bins
for ii, GM_ii in enumerate(GM_bins):
# GM Key label
GM_key = GM_keys[ii]
# GM string
GMbin_min, GMbin_max = GM_ii
if param_dict['perf_opt']:
print('\n{0} Halo Mass range: {1}'.format(Prog_msg, GM_keys))
else:
print('\n{0} Group Mass range: {1}'.format(Prog_msg, GM_key))
df_bin_org = catl_pd_clean.loc[ (catl_pd_clean[gm_key] >= GMbin_min) &\
(catl_pd_clean[gm_key] < GMbin_max)].copy()
df_bin_org.reset_index(inplace=True, drop=True)
##
## Looping over galaxy properties
for jj, prop in enumerate(pd_keys):
print('{0} >> Galaxy Prop: {1}'.format(Prog_msg, prop))
( sat_quenched_frac,
sat_quenched_frac_sh) = frac_prop_calc( df_bin_org ,
prop ,
param_dict ,
catl_keys_dict)
##
## Saving results to dictionary
GM_prop_dict[prop][0][ii] = sat_quenched_frac
GM_prop_dict[prop][1][GM_key] = sat_quenched_frac_sh
##
## Saving `GM_prop_dict` to Pickle file
print('{0} Saving data to Pickle: \n\t{1}\n'.format(Prog_msg, p_fname))
p_data = [param_dict, GM_prop_dict, GM_arr, GM_bins, GM_keys]
pickle.dump(p_data, open(p_fname,'wb'))
##
## Showing path to file
print('{0} Data saved to Pickle: | |
"august" : {
"nodetype" : "namednumber",
"number" : "8"
},
"september" : {
"nodetype" : "namednumber",
"number" : "9"
},
"october" : {
"nodetype" : "namednumber",
"number" : "10"
},
"november" : {
"nodetype" : "namednumber",
"number" : "11"
},
"december" : {
"nodetype" : "namednumber",
"number" : "12"
},
},
},
"access" : "readwrite",
"description" :
"""Daylight saving time service end month.""",
}, # scalar
"daylightSavingTimeEndDateHour" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.8.10.9",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""Daylight saving time service end time.""",
}, # scalar
"sysMgmt" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.9",
}, # node
"sysMgmtConfigSave" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.9.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"config" : {
"nodetype" : "namednumber",
"number" : "1"
},
},
},
"access" : "readwrite",
"description" :
"""If setting value is given, the variable write index will be set and running-config will be written to the assigned configuration file.
If not, running-config will be written to the booting one.""",
}, # scalar
"sysMgmtBootupConfig" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.9.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"config" : {
"nodetype" : "namednumber",
"number" : "1"
},
},
},
"access" : "readwrite",
"description" :
"""The setting value (read index) will be written into non-volatile memory.
While rebooting, the variable write index is equal to read index initially.
You can change the value of write index by CLI / MIB.""",
}, # scalar
"sysMgmtReboot" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.9.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"nothing" : {
"nodetype" : "namednumber",
"number" : "0"
},
"reboot" : {
"nodetype" : "namednumber",
"number" : "1"
},
},
},
"access" : "readwrite",
"description" :
"""Reboot switch from SNMP. 1:Reboot, 0:Nothing""",
}, # scalar
"sysMgmtDefaultConfig" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.9.4",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"nothing" : {
"nodetype" : "namednumber",
"number" : "0"
},
"reset_to_default" : {
"nodetype" : "namednumber",
"number" : "1"
},
},
},
"access" : "readwrite",
"description" :
"""Erase running config and reset to default.""",
}, # scalar
"sysMgmtLastActionStatus" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.9.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"none" : {
"nodetype" : "namednumber",
"number" : "0"
},
"success" : {
"nodetype" : "namednumber",
"number" : "1"
},
"fail" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readonly",
"description" :
"""Display status of last mgmt action.""",
}, # scalar
"sysMgmtCPUUsage" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.9.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""Show device CPU load in %, it's the snapshot of CPU load when
getting the values.""",
}, # scalar
"sysMgmtCounterReset" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.9.9",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"enable" : {
"nodetype" : "namednumber",
"number" : "1"
},
"disable" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""Reset all port counters.""",
}, # scalar
"layer2Setup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.192.168.127.126.10",
}, # node
"vlanTypeSetup" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.10.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"dot1Q" : {
"nodetype" : "namednumber",
"number" : "1"
},
"port_based" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"igmpSnoopingStateSetup" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"tagVlanPortIsolationState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""This setting will also show the result in the portIsolationState""",
}, # scalar
"stpState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"tagVlanIngressCheckState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"igmpFilteringStateSetup" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"unknownMulticastFrameForwarding" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.7",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"flooding" : {
"nodetype" : "namednumber",
"number" : "1"
},
"drop" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"multicastGrpHostTimeOut" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.8",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"igmpsnp8021pPriority" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.10",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""Set the 802.1p priority of control messages for igmp-snooping(0~8, 8-No Change)""",
}, # scalar
"igmpsnpVlanMode" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.10.11",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"auto" : {
"nodetype" : "namednumber",
"number" : "1"
},
"fixed" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"stpMode" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.12",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"rstp" : {
"nodetype" : "namednumber",
"number" : "1"
},
"mstp" : {
"nodetype" : "namednumber",
"number" : "3"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"igmpsnpVlanTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.10.13",
"status" : "current",
"description" :
"""""",
}, # table
"igmpsnpVlanEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.13.1",
"create" : "true",
"status" : "current",
"linkage" : [
"igmpsnpVid",
],
"description" :
"""An entry in IgmpsnpVlanTable.""",
}, # row
"igmpsnpVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.13.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"igmpsnpVlanName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.10.13.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"igmpsnpVlanRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.13.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"igmpsnpQuerierMode" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.10.14",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
| |
<filename>src/operation.py<gh_stars>1-10
# -*- coding:utf-8 -*-
"""
* Copyright@2016 Jingtum Inc. or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from config import Config
from account import FinGate
from logger import logger
from server import APIServer
from serialize import JingtumBaseDecoder
from account import path_convert, Amount, Memo
class JingtumOperException(Exception):
pass
class Operation(object):
def __init__(self, wallet):
super(Operation, self).__init__()
self.src_address = wallet.address
self.src_secret = wallet.secret
self.is_sync = False
self.client_resource_id = self.getNextUUID()
self.api_helper = APIServer()
from server import g_test_evn
if g_test_evn:
self.api_helper.setTest(True)
self.validateAddress(self.src_address)
def getNextUUID(self):
return FinGate.getNextUUID()
def validateAddress(self, address):
if not JingtumBaseDecoder.verify_checksum(JingtumBaseDecoder.decode_base(address, 25)):
raise JingtumOperException("Invalid address: %s" % str(address))
def submit(self, callback=None):
#print self.oper()
from server import g_test_evn
if g_test_evn:
self.api_helper.setTest(True)
if callback is None:
return self.api_helper.post(*self.oper(), callback=callback)
else:
self.api_helper.postasyn(*self.oper(), callback=callback)
return None
# def addSrcSecret(self, src_secret):
# self.src_secret = src_secret
def setValidate(self, is_sync):
self.is_sync = is_sync
def setClientId(self, client_resource_id):
self.client_resource_id = client_resource_id
class PaymentOperation(Operation):
def __init__(self, wallet):
super(PaymentOperation, self).__init__(wallet)
self.amt = {}
self.dest_address = ""
self.path_convert = path_convert
self.path = None
self.memos = []
def para_required(func):
def _func(*args, **args2):
if len(args[0].amt) == 0:
#logger.error("addAmount first:" + func.__name__)
raise JingtumOperException("addAmount first before oper.")
elif args[0].dest_address == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addDestAddress first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setAmount(self, amount):
self.amt = amount
def setDestAddress(self, dest_address):
self.dest_address = dest_address
def setChoice(self, key):
if self.path_convert.has_key(key):
self.path = self.path_convert[key]
def setMemo(self, value):
self.memos.append(value)
@para_required
def oper(self):
_payment = {}
_payment["destination_amount"] = self.amt
_payment["source_account"] = self.src_address
_payment["destination_account"] = self.dest_address
if len(self.memos) > 0:
_payment["memos"] = self.memos
if self.path is not None:
_payment["paths"] = self.path
_para = {}
_para["secret"] = self.src_secret
_para["payment"] = _payment
_para["client_resource_id"] = self.client_resource_id
if self.is_sync:
url = 'accounts/{address}/payments?validated=true'
else:
url = 'accounts/{address}/payments'
url = url.format(address=self.src_address)
return url, _para
class OrderOperation(Operation):
SELL = "sell"
BUY = "buy"
def __init__(self, wallet):
super(OrderOperation, self).__init__(wallet)
self.order_type = "buy"
self.base_currency, self.base_issuer = None, None
self.counter_currency, self.counter_issuer = None, None
self.amount = 0
self.price = 0
def para_required(func):
def _func(*args, **args2):
if args[0].counter_currency is None or args[0].counter_issuer is None:
#logger.error("setPair first:" + func.__name__)
raise JingtumOperException("setPair first before oper.")
elif args[0].base_currency is None or args[0].base_issuer is None:
#logger.error("setPair first:" + func.__name__)
raise JingtumOperException("setPair first before oper.")
elif args[0].amount == 0:
#logger.error("setAmount first:" + func.__name__)
raise JingtumOperException("setAmount first before oper.")
elif args[0].price == 0:
#logger.error("setPrice first:" + func.__name__)
raise JingtumOperException("setPrice first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setPair(self, pair):
try:
base, counter = pair.split("/")
if base.find(":") > 0:
self.base_currency, self.base_issuer = base.split(":")
else:
self.base_currency, self.base_issuer = base, ""
if counter.find(":") > 0:
self.counter_currency, self.counter_issuer = counter.split(":")
else:
self.counter_currency, self.counter_issuer = counter, ""
except Exception, e:
raise JingtumOperException("setPair para Invalid.")
def setType(self, order_type):
self.order_type = order_type
def setAmount(self, amount):
self.amount = amount
def setPrice(self, price):
self.price = price
# def setTakePays(self, currency_type, currency_value, counterparty=""):
# self.takerpays["value"] = str(currency_value)
# self.takerpays["currency"] = str(currency_type)
# self.takerpays["counterparty"] = str(counterparty)
# def setTakeGets(self, currency_type, currency_value, counterparty=""):
# self.takergets["value"] = str(currency_value)
# self.takergets["currency"] = str(currency_type)
# self.takergets["counterparty"] = str(counterparty)
@para_required
def oper(self):
_order = {}
takergets, takerpays = {}, {}
if self.order_type == "sell":
takergets["value"] = str(self.amount)
takergets["currency"] = str(self.base_currency)
takergets["counterparty"] = str(self.base_issuer)
takerpays["value"] = str(self.amount * self.price)
takerpays["currency"] = str(self.counter_currency)
takerpays["counterparty"] = str(self.counter_issuer)
else:
takerpays["value"] = str(self.amount)
takerpays["currency"] = str(self.base_currency)
takerpays["counterparty"] = str(self.base_issuer)
takergets["value"] = str(self.amount * self.price)
takergets["currency"] = str(self.counter_currency)
takergets["counterparty"] = str(self.counter_issuer)
_order["type"] = self.order_type
_order["taker_pays"] = takerpays
_order["taker_gets"] = takergets
_para = {}
_para["secret"] = self.src_secret
_para["order"] = _order
if self.is_sync:
url = 'accounts/{address}/orders?validated=true'
else:
url = 'accounts/{address}/orders'
url = url.format(address=self.src_address)
return url, _para
class CancelOrderOperation(Operation):
"""docstring for CancelOrder"""
def __init__(self, wallet):
super(CancelOrderOperation, self).__init__(wallet)
self.order_num = 0
def para_required(func):
def _func(*args, **args2):
if args[0].order_num == 0:
#logger.error("setOrderNum first:" + func.__name__)
raise JingtumOperException("setOrderNum first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setSequence(self, order_num):
self.order_num = order_num
@para_required
def oper(self):
_para = {}
_para["secret"] = self.src_secret
if self.is_sync:
url = 'accounts/{address}/orders/{order}?validated=true'
else:
url = 'accounts/{address}/orders/{order}'
url = url.format(address=self.src_address, order=self.order_num)
return url, _para, "DELETE"
class SettingsOperation(Operation):
def __init__(self, wallet):
super(SettingsOperation, self).__init__(wallet)
self.settings = {}
def setDomain(self, domain):
self.settings["domain"] = domain
def setTransferRate(self, rate):
self.settings["transfer_rate"] = rate
def setPasswordSpent(self, b=False):
self.settings["password_spent"] = b
def setRequireDestinationTag(self, b=False):
self.settings["require_destination_tag"] = b
def setRequireAuthorization(self, b=False):
self.settings["require_authorization"] = b
def setDisallowSwt(self, b=False):
self.settings["disallow_swt"] = b
def setEmailHash(self, hash_id):
self.settings["email_hash"] = hash_id
def setWalletLocator(self, wallet_locator):
self.settings["wallet_locator"] = wallet_locator
def setWalletSize(self, wallet_size):
self.settings["wallet_size"] = wallet_size
def setMessageKey(self, message_key):
self.settings["message_key"] = message_key
def setRegularKey(self, regular_key):
self.settings["regular_key"] = regular_key
def setDisableMaster(self, b=False):
self.settings["disable_master"] = b
def oper(self):
_para = {}
_para["secret"] = self.src_secret
_para["settings"] = self.settings
if self.is_sync:
url = 'accounts/{address}/settings?validated=true'
else:
url = 'accounts/{address}/settings'
url = url.format(address=self.src_address)
return url, _para
class RelationsOperation(Operation):
def __init__(self, wallet):
super(RelationsOperation, self).__init__(wallet)
self.amt = None
self.counterparty = ""
self.relation_type = ""
def para_required(func):
def _func(*args, **args2):
if len(args[0].amt) == 0:
#logger.error("addAmount first:" + func.__name__)
raise JingtumOperException("addAmount first before oper.")
elif args[0].relation_type == "":
#logger.error("setRelationType first:" + func.__name__)
raise JingtumOperException("setRelationType first before oper.")
elif args[0].counterparty == "":
#logger.error("setCounterparty first:" + func.__name__)
raise JingtumOperException("setCounterparty first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setAmount(self, amt):
amt.update(limit=amt.pop("value"))
self.amt = amt
def setCounterparty(self, counterparty):
self.counterparty = counterparty
def setType(self, relation_type):
self.relation_type = relation_type
@para_required
def oper(self):
_para = {}
_para["secret"] = self.src_secret
_para["type"] = self.relation_type
_para["counterparty"] = self.counterparty
_para["amount"] = self.amt
if self.is_sync:
url = 'accounts/{address}/relations?validated=true'
else:
url = 'accounts/{address}/relations'
url = url.format(address=self.src_address)
return url, _para
class RemoveRelationsOperation(Operation):
def __init__(self, wallet):
super(RemoveRelationsOperation, self).__init__(wallet)
self.amt = {}
self.counterparty = ""
self.relation_type = ""
def para_required(func):
def _func(*args, **args2):
if len(args[0].amt) == 0:
#logger.error("addAmount first:" + func.__name__)
raise JingtumOperException("addAmount first before oper.")
elif args[0].relation_type == "":
#logger.error("setRelationType first:" + func.__name__)
raise JingtumOperException("setRelationType first before oper.")
elif args[0].counterparty == "":
#logger.error("setCounterparty first:" + func.__name__)
raise JingtumOperException("setCounterparty first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setAmount(self, amt):
amt.update(limit=amt.pop("value"))
self.amt = amt
def setCounterparty(self, counterparty):
self.counterparty = counterparty
def setType(self, relation_type):
self.relation_type = relation_type
@para_required
def oper(self):
_para = {}
_para["secret"] = self.src_secret
_para["type"] = self.relation_type
_para["counterparty"] = self.counterparty
_para["amount"] = self.amt
url = 'accounts/{address}/relations'
url = url.format(address=self.src_address)
return url, _para, "DELETE"
class AddTrustLine(Operation):
def __init__(self, wallet):
super(AddTrustLine, self).__init__(wallet)
self.counterparty = ""
self.currency = ""
self.frozen = False
def para_required(func):
def _func(*args, **args2):
if len(args[0].counterparty) == 0:
#logger.error("setCounterparty first:" + func.__name__)
raise JingtumOperException("setCounterparty first before oper.")
elif args[0].currency == "":
#logger.error("setCurrency first:" + func.__name__)
raise JingtumOperException("setCurrency first before oper.")
elif args[0].src_secret == "":
#logger.error("addDestAddress first:" + func.__name__)
raise JingtumOperException("addSrcSecret first before oper.")
else:
back = func(*args, **args2)
return back
return _func
def setCounterparty(self, counterparty):
self.counterparty = counterparty
def setLimit(self, limit):
self.trust_limit = limit
def setCurrency(self, currency):
self.currency = currency
def setTrustlineFrozen(self, frozen):
self.frozen = frozen
@para_required
def oper(self):
_trust = {}
_trust["limit"] = self.trust_limit
_trust["currency"] = self.currency
_trust["counterparty"] = self.counterparty
_trust["account_trustline_frozen"] = self.frozen
_para = {}
_para["secret"] | |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from zzz_perception_msgs/TrackingBox.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import zzz_perception_msgs.msg
import geometry_msgs.msg
class TrackingBox(genpy.Message):
_md5sum = "8c42450ea25865d31ecef09127b75764"
_type = "zzz_perception_msgs/TrackingBox"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# Defines a box-shaped 3D tracking result.
# Unique id to determine the identification of tracked object.
# This field could be a increasing number from zero or random hash number
uint64 uid
# Defines the (existence) confidence of the object [-1 ~ 1].
# The confidence can be negative to demonstrate that this object is invalid.
# This confidence should not be fed into decision system. It should be used for validation purpose.
float32 confidence
# Describe several classification result for the object
# This field is required to be sorted in descending order of scores
ObjectClass[] classes
# This field contains the behavior identification based on light signal or hand signal
ObjectSignals signal
# Current 3D bounding box.
BoundingBox bbox
# Estimated 3D velocity and accelaration
geometry_msgs/TwistWithCovariance twist
geometry_msgs/AccelWithCovariance accel
# This field is for store auxiliary text or data
string comments
================================================================================
MSG: zzz_perception_msgs/ObjectClass
# The size of (in meters) the bounding box surrounding the object's center pose.
# The unique numeric classification ID of object detected
uint32 classid
# The probability or confidence value of the detected object. By convention, this value should lie in the range 0~1.
float32 score
# Other information about the class (e.g. class name). Only for debug
string comments
##############################################################
### Here is a hierarchical table of all included types ###
##############################################################
# Hierarchy is encoded in a 32-bit integer. Each 8 bit stand for a level, and leftmost 8 bit is the top level
uint32 UNKNOWN = 0 # 0x0000
uint32 UNKNOWN_DYNAMIC = 16 # 0x0010
uint32 UNKNOWN_STATIC = 32 # 0x0020
uint32 VEHICLE = 1 # 0x0001
uint32 VEHICLE_PASSENGER = 17 # 0x0011, normal passenger_vehicles
uint32 VEHICEL_VAN = 33 # 0x0021
uint32 VEHICLE_TRUCK = 49 # 0x0031
uint32 VEHICLE_BUS = 65 # 0x0041
uint32 VEHICLE_SCHOOLBUS = 321 # 0x0141
uint32 VEHICLE_SCHOOLBUS_STOP = 4417 # 0x1141
uint32 VEHICLE_EMERGENCY = 81 # 0x0051, emergency vehicles, including
uint32 VEHICLE_EMERGENCY_POLICE = 337 # 0x0151
uint32 VEHICLE_EMERGENCY_POLICE_FLASH = 4433 # 0x1151
uint32 VEHICLE_EMERGENCY_FIRE = 593 # 0x0251
uint32 VEHICLE_EMERGENCY_FIRE_FLASH = 4689 # 0x1251
uint32 VEHICLE_EMERGENCY_CIVIL = 849 # 0x0351, including utility vehicle and tow trucks
uint32 VEHICLE_EMERGENCY_CIVIL_FLASH = 4945 # 0x1351
uint32 HUMAN = 2 # 0x0002
uint32 HUMAN_PEDESTRIAN = 18 # 0x0012
uint32 HUMAN_ROADWORKER = 34 # 0x0022
uint32 CYCLIST = 3 # 0x0003
uint32 CYCLIST_BICYCLE = 19 # 0x0013
uint32 CYCLIST_MOTORCYCLE = 35 # 0x0023
uint32 CYCLIST_TRICYCLE = 51 # 0x0033
uint32 ANIMAL = 4 # 0x0004
uint32 ANIMAL_DOGLIKE = 20 # 0x0014, includes dog, cat, wolf, etc.
uint32 ANIMAL_DEERLIKE = 36 # 0x0024, includes deer, etc.
uint32 ANIMAL_COWLIKE = 52 # 0x0034, includes cow, horse, pig, etc.
uint32 ROAD_OBJECT = 5 # 0x0005, objects in road area
uint32 ROAD_TRAFFIC_CONE = 21 # 0x0015, traffic cone
uint32 ROAD_TRAFFIC_BLOCKER = 37 # 0x0025, traffic blocker, e.g. "Road Closed" sign
uint32 ROADSIDE_OBJECT = 6 # 0x0006, objects in road side
uint32 ROADSIDE_TRAFFIC_LIGHT = 22 # 0x0016
uint32 ROADSIDE_TRAFFIC_SIGN = 38 # 0x0026
uint32 ROADSIDE_TREE = 54 # 0x0036, including all roadside vegetation
uint32 LEVEL_MASK_0 = 15 # 0x000f
uint32 LEVEL_MASK_1 = 255 # 0x00ff
uint32 LEVEL_MASK_2 = 4095 # 0x0fff
uint32 LEVEL_MASK_3 = 65535 # 0xffff
================================================================================
MSG: zzz_perception_msgs/ObjectSignals
# This message is used to represent detected vehicle light signals or human hand signals
# Signal flags. Multiple signal emission can exists in the same time.
uint16 flags
uint16 UNKNOWN = 0 # 0x00
uint16 NONE = 16 # 0x10
# This field is related to https://en.wikipedia.org/wiki/Automotive_lighting
uint16 VEHICLE_SIGNAL = 1 # 0x01
uint16 VEHICLE_SIGNAL_LEFT_TURN = 17 # 0x11
uint16 VEHICLE_SIGNAL_RIGHT_TURN = 33 # 0x21
uint16 VEHICLE_SIGNAL_HAZARD = 49 # 0x31
uint16 VEHICLE_SIGNAL_BRAKE = 65 # 0x41
uint16 VEHICLE_SIGNAL_REVERSE = 81 # 0x51
# This field is related to https://en.wikipedia.org/wiki/Traffic_light#Single_aspects
uint16 TRAFFIC_LIGHT = 2 # 0x02
uint16 TRAFFIC_LIGHT_RED = 18 # 0x12
uint16 TRAFFIC_LIGHT_YELLOW = 34 # 0x22
uint16 TRAFFIC_LIGHT_GREEN = 50 # 0x32
uint16 TRAFFIC_LIGHT_GREEN_LEFT_TURN = 66 # 0x42
uint16 TRAFFIC_LIGHT_GREEN_RIGHT_TURN = 66 # 0x42
# Confidence of the signal detection
float32 score
================================================================================
MSG: zzz_perception_msgs/BoundingBox
# A 3D bounding box that can be positioned and rotated about its center (6 DOF). Dimensions of this box are in meters
# The position and orientation of the rigid body center
geometry_msgs/PoseWithCovariance pose
# The size of (in meters) the bounding box surrounding the object's center pose.
DimensionWithCovariance dimension
================================================================================
MSG: geometry_msgs/PoseWithCovariance
# This represents a pose in free space with uncertainty.
Pose pose
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: zzz_perception_msgs/DimensionWithCovariance
# Describing the size object in 3D space (in meters) with uncertainty
float64 length_x # width
float64 length_y # height
float64 length_z # length
# Row-major representation of the 3x3 covariance matrix
# In order, the parameters are: (length_x, length_y, length_z)
float64[9] covariance
================================================================================
MSG: geometry_msgs/TwistWithCovariance
# This expresses velocity in free space with uncertainty.
Twist twist
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Twist
# This expresses velocity in free space broken into its linear and angular parts.
Vector3 linear
Vector3 angular
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/AccelWithCovariance
# This expresses acceleration in free space with uncertainty.
Accel accel
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Accel
# This expresses acceleration in free space broken into its linear and angular parts.
Vector3 linear
Vector3 angular
"""
__slots__ = ['uid','confidence','classes','signal','bbox','twist','accel','comments']
_slot_types = ['uint64','float32','zzz_perception_msgs/ObjectClass[]','zzz_perception_msgs/ObjectSignals','zzz_perception_msgs/BoundingBox','geometry_msgs/TwistWithCovariance','geometry_msgs/AccelWithCovariance','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
uid,confidence,classes,signal,bbox,twist,accel,comments
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TrackingBox, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.uid is None:
self.uid = 0
if self.confidence is None:
self.confidence = 0.
if self.classes is None:
self.classes = []
if self.signal is None:
self.signal = zzz_perception_msgs.msg.ObjectSignals()
if self.bbox is None:
self.bbox = zzz_perception_msgs.msg.BoundingBox()
if self.twist is None:
self.twist = geometry_msgs.msg.TwistWithCovariance()
if self.accel is None:
self.accel = geometry_msgs.msg.AccelWithCovariance()
if self.comments is None:
self.comments = ''
else:
self.uid = 0
self.confidence = 0.
self.classes = []
self.signal = zzz_perception_msgs.msg.ObjectSignals()
self.bbox = zzz_perception_msgs.msg.BoundingBox()
self.twist = geometry_msgs.msg.TwistWithCovariance()
self.accel = geometry_msgs.msg.AccelWithCovariance()
self.comments = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Qf().pack(_x.uid, _x.confidence))
length = len(self.classes)
buff.write(_struct_I.pack(length))
for val1 in self.classes:
_x = val1
buff.write(_get_struct_If().pack(_x.classid, _x.score))
_x = val1.comments
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_Hf7d().pack(_x.signal.flags, _x.signal.score, _x.bbox.pose.pose.position.x, _x.bbox.pose.pose.position.y, _x.bbox.pose.pose.position.z, _x.bbox.pose.pose.orientation.x, _x.bbox.pose.pose.orientation.y, _x.bbox.pose.pose.orientation.z, _x.bbox.pose.pose.orientation.w))
buff.write(_get_struct_36d().pack(*self.bbox.pose.covariance))
_x = self
buff.write(_get_struct_3d().pack(_x.bbox.dimension.length_x, _x.bbox.dimension.length_y, _x.bbox.dimension.length_z))
buff.write(_get_struct_9d().pack(*self.bbox.dimension.covariance))
_x = self
buff.write(_get_struct_6d().pack(_x.twist.twist.linear.x, _x.twist.twist.linear.y, _x.twist.twist.linear.z, _x.twist.twist.angular.x, _x.twist.twist.angular.y, _x.twist.twist.angular.z))
buff.write(_get_struct_36d().pack(*self.twist.covariance))
_x = self
buff.write(_get_struct_6d().pack(_x.accel.accel.linear.x, _x.accel.accel.linear.y, _x.accel.accel.linear.z, _x.accel.accel.angular.x, _x.accel.accel.angular.y, _x.accel.accel.angular.z))
buff.write(_get_struct_36d().pack(*self.accel.covariance))
_x = self.comments
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This module defines the two default GLM passes of fmristat
The results of both passes of the GLM get pushed around by generators, which
know how to get out the (probably 3D) data for each slice, or parcel (for the
AR) case, estimate in 2D, then store the data back again in its original shape.
The containers here, in the execute methods, know how to reshape the data on the
way into the estimation (to 2D), then back again, to 3D, or 4D.
It's relatively easy to do this when just iterating over simple slices, but it
gets a bit more complicated when taking arbitrary shaped samples from the image,
as we do for estimating the AR coefficients, where we take all the voxels with
similar AR coefficients at once.
"""
from __future__ import absolute_import
import copy
import os.path as path
import numpy as np
import numpy.linalg as npl
from nipy.algorithms.statistics.models.regression import (
OLSModel, ARModel, ar_bias_corrector, ar_bias_correct)
from nipy.algorithms.statistics.formula import make_recarray
# nipy core imports
from nipy.core.api import Image, parcels, matrix_generator, AffineTransform
# nipy IO imports
from nipy.io.api import save_image
# fmri imports
from ..api import FmriImageList, axis0_generator
from . import outputters
class ModelOutputImage(object):
"""
These images have their values filled in as the model is fit, and
are saved to disk after being completely filled in.
They are saved to disk by calling the 'save' method.
The __getitem__ and __setitem__ calls are delegated to a private
Image. An exception is raised if trying to get/set data after the
data has been saved to disk.
"""
def __init__(self, filename, coordmap, shape, clobber=False):
self.filename = filename
self._im_data = np.zeros(shape)
self._im = Image(self._im_data, coordmap)
# Using a dangerous undocumented API here
self.clobber = clobber
self._flushed = False
def save(self):
"""
Save current Image data to disk
"""
if not self.clobber and path.exists(self.filename):
raise ValueError('trying to clobber existing file')
save_image(self._im, self.filename)
self._flushed = True
del(self._im)
def __getitem__(self, item):
if self._flushed:
raise ValueError('trying to read value from a '
'saved ModelOutputImage')
return self._im_data[item]
def __setitem__(self, item, value):
if self._flushed:
raise ValueError('trying to set value on saved'
'ModelOutputImage')
self._im_data[item] = value
def model_generator(formula, data, volume_start_times, iterable=None,
slicetimes=None, model_type=OLSModel,
model_params = lambda x: ()):
"""
Generator for the models for a pass of fmristat analysis.
"""
volume_start_times = make_recarray(volume_start_times.astype(float), 't')
# Generator for slices of the data with time as first axis
axis0_gen = axis0_generator(data, slicers=iterable)
# Iterate over 2D slices of the data
for indexer, indexed_data in matrix_generator(axis0_gen):
model_args = model_params(indexer) # model may depend on i
# Get the design for these volume start times
design = formula.design(volume_start_times, return_float=True)
# Make the model from the design
rmodel = model_type(design, *model_args)
yield indexer, indexed_data, rmodel
def results_generator(model_iterable):
"""
Generator for results from an iterator that returns
(index, data, model) tuples.
See model_generator.
"""
for i, d, m in model_iterable:
yield i, m.fit(d)
class OLS(object):
"""
First pass through fmri_image.
Parameters
----------
fmri_image : `FmriImageList` or 4D image
object returning 4D data from np.asarray, with first
(``object[0]``) axis being the independent variable of the model;
object[0] returns an object with attribute ``shape``.
formula : :class:`nipy.algorithms.statistics.formula.Formula`
outputs :
volume_start_times :
"""
def __init__(self, fmri_image, formula, outputs=[],
volume_start_times=None):
self.fmri_image = fmri_image
try:
self.data = fmri_image.get_data()
except AttributeError:
self.data = fmri_image.get_list_data(axis=0)
self.formula = formula
self.outputs = outputs
if volume_start_times is None:
self.volume_start_times = self.fmri_image.volume_start_times
else:
self.volume_start_times = volume_start_times
def execute(self):
m = model_generator(self.formula, self.data,
self.volume_start_times,
model_type=OLSModel)
r = results_generator(m)
def reshape(i, x):
if len(x.shape) == 2:
if type(i) is type(1):
x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:]
if type(i) not in [type([]), type(())]:
i = (i,)
else:
i = tuple(i)
i = (slice(None,None,None),) + tuple(i)
else:
if type(i) is type(1):
x.shape = self.fmri_image[0].shape[1:]
return i, x
o = generate_output(self.outputs, r, reshape=reshape)
def estimateAR(resid, design, order=1):
"""
Estimate AR parameters using bias correction from fMRIstat.
Parameters
----------
resid: array-like
residuals from model
model: an OLS model used to estimate residuals
Returns
-------
output : array
shape (order, resid
"""
invM = ar_bias_corrector(design, npl.pinv(design), order)
return ar_bias_correct(resid, order, invM)
class AR1(object):
"""
Second pass through fmri_image.
Parameters
----------
fmri_image : `FmriImageList`
object returning 4D array from ``np.asarray``, having attribute
``volume_start_times`` (if `volume_start_times` is None), and
such that ``object[0]`` returns something with attributes ``shape``
formula : :class:`nipy.algorithms.statistics.formula.Formula`
rho : ``Image``
image of AR(1) coefficients. Returning data from
``rho.get_data()``, and having attribute ``coordmap``
outputs :
volume_start_times :
"""
def __init__(self, fmri_image, formula, rho, outputs=[],
volume_start_times=None):
self.fmri_image = fmri_image
try:
self.data = fmri_image.get_data()
except AttributeError:
self.data = fmri_image.get_list_data(axis=0)
self.formula = formula
self.outputs = outputs
# Cleanup rho values, truncate them to a scale of 0.01
g = copy.copy(rho.coordmap)
rho = rho.get_data()
m = np.isnan(rho)
r = (np.clip(rho,-1,1) * 100).astype(np.int) / 100.
r[m] = np.inf
self.rho = Image(r, g)
if volume_start_times is None:
self.volume_start_times = self.fmri_image.volume_start_times
else:
self.volume_start_times = volume_start_times
def execute(self):
iterable = parcels(self.rho, exclude=[np.inf])
def model_params(i):
return (self.rho.get_data()[i].mean(),)
# Generates indexer, data, model
m = model_generator(self.formula, self.data,
self.volume_start_times,
iterable=iterable,
model_type=ARModel,
model_params=model_params)
# Generates indexer, data, 2D results
r = results_generator(m)
def reshape(i, x):
"""
To write output, arrays have to be reshaped --
this function does the appropriate reshaping for the two
passes of fMRIstat.
These passes are:
i) 'slices through the z-axis'
ii) 'parcels of approximately constant AR1 coefficient'
"""
if len(x.shape) == 2: # 2D imput matrix
if type(i) is type(1): # integer indexing
# reshape to ND (where N is probably 4)
x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:]
# Convert lists to tuples, put anything else into a tuple
if type(i) not in [type([]), type(())]:
i = (i,)
else:
i = tuple(i)
# Add : to indexing
i = (slice(None,None,None),) + tuple(i)
else: # not 2D
if type(i) is type(1): # integer indexing
x.shape = self.fmri_image[0].shape[1:]
return i, x
# Put results pulled from results generator r, into outputs
o = generate_output(self.outputs, r, reshape=reshape)
def output_T(outbase, contrast, fmri_image, effect=True, sd=True, t=True,
clobber=False):
""" Return t contrast regression outputs list for `contrast`
Parameters
----------
outbase : string
Base filename that will be used to construct a set of files
for the TContrast. For example, outbase='output.nii' will
result in the following files (assuming defaults for all other
params): output_effect.nii, output_sd.nii, output_t.nii
contrast : array
F contrast matrix
fmri_image : ``FmriImageList`` or ``Image``
object such that ``object[0]`` has attributes ``shape`` and
``coordmap``
effect : {True, False}, optional
whether to write an effect image
sd : {True, False}, optional
whether to write a standard deviation image
t : {True, False}, optional
whether to write a t image
clobber : {False, True}, optional
whether to overwrite images that exist.
Returns
-------
reglist : ``RegressionOutputList`` instance
Regression output list with selected outputs, where selection is by
inputs `effect`, `sd` and `t`
Notes
-----
Note that this routine uses the corresponding ``output_T`` routine in
:mod:`outputters`, but indirectly via the TOutput object.
"""
def build_filename(label):
index = outbase.find('.')
return ''.join([outbase[:index], '_', label, outbase[index:]])
if effect:
effectim = ModelOutputImage(build_filename('effect'),
fmri_image[0].coordmap,
fmri_image[0].shape, clobber=clobber)
else:
effectim = None
if sd:
sdim = ModelOutputImage(build_filename('sd'),
fmri_image[0].coordmap, fmri_image[0].shape,
clobber=clobber)
else:
sdim = None
if t:
tim = ModelOutputImage(build_filename('t'),
fmri_image[0].coordmap,fmri_image[0].shape,
clobber=clobber)
else:
tim = None
return outputters.TOutput(contrast, effect=effectim, sd=sdim, t=tim)
def output_F(outfile, contrast, fmri_image, clobber=False):
''' output F statistic images
Parameters
----------
outfile : str
filename for F contrast image
contrast : array
F contrast matrix
fmri_image : ``FmriImageList`` or ``Image``
object such that ``object[0]`` has attributes ``shape`` and
``coordmap``
clobber : bool
if True, overwrites previous output; if False, raises error
Returns
-------
f_reg_out : ``RegressionOutput`` instance
Object that can a) be called with a results instance as argument,
returning an array, and b) accept the output array for storing, via
``obj[slice_spec] = arr`` type slicing.
'''
f = ModelOutputImage(outfile, fmri_image[0].coordmap, fmri_image[0].shape,
clobber=clobber)
return outputters.RegressionOutput(f, lambda x:
outputters.output_F(x, contrast))
def output_AR1(outfile, fmri_image, clobber=False):
"""
Create an output file of the AR1 parameter from the OLS pass of
fmristat.
Parameters
----------
outfile :
fmri_image : ``FmriImageList`` or 4D image
object such that ``object[0]`` has | |
":cur_option", reg0),
(str_store_string, s3, s0),
(str_store_string, s2, "str_s2_s3"),
(try_end),
(try_end),
(create_text_overlay, reg0, s2, tf_scrollable),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 230),
(position_set_y, pos1, 425),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 540),
(position_set_y, pos1, 150),
(overlay_set_area_size, reg0, pos1),
(presentation_set_duration, 999999),
(try_end),
]),
(ti_on_presentation_run,
[
(str_store_welcome_message, s0),
(try_begin),
(neq, "$g_multiplayer_show_server_rules", 1),
(this_or_next|str_is_empty, s0),
(eq, "$g_multiplayer_welcome_message_shown", 1),
(presentation_set_duration, 0),
(neg|is_presentation_active, "prsnt_multiplayer_escape_menu"),
# (neg|is_presentation_active, "prsnt_multiplayer_team_select"),
(multiplayer_get_my_player, ":my_player_no"),
(player_set_troop_id, ":my_player_no", -1),
(multiplayer_send_int_to_server, multiplayer_event_change_team_no, multi_team_spectator),
(player_set_team_no, ":my_player_no", multi_team_spectator),
(start_presentation, "prsnt_multiplayer_escape_menu"),
(else_try),
(store_mission_timer_a, ":mission_timer"),
(gt, ":mission_timer", 1),
(this_or_next|key_clicked, key_escape),
(this_or_next|key_clicked, key_space),
(this_or_next|key_clicked, key_enter),
(this_or_next|key_clicked, key_left_mouse_button),
(this_or_next|key_clicked, key_right_mouse_button),
(this_or_next|key_clicked, key_xbox_ltrigger),
(key_clicked, key_xbox_rtrigger),
(assign, "$g_multiplayer_welcome_message_shown", 1),
(presentation_set_duration, 0),
(neg|is_presentation_active, "prsnt_multiplayer_escape_menu"),
# (neg|is_presentation_active, "prsnt_multiplayer_team_select"),
(try_begin),
(eq, "$g_multiplayer_show_server_rules", 1),
(assign, "$g_multiplayer_show_server_rules", 0),
(start_presentation, "prsnt_multiplayer_escape_menu"),
(else_try),
(multiplayer_get_my_player, ":my_player_no"),
(player_set_troop_id, ":my_player_no", -1),
(multiplayer_send_int_to_server, multiplayer_event_change_team_no, multi_team_spectator),
(player_set_team_no, ":my_player_no", multi_team_spectator),
(start_presentation, "prsnt_multiplayer_escape_menu"),
(try_end),
(try_end),
]),
]),
("multiplayer_item_select", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(multiplayer_get_my_player, ":my_player_no"),
(assign, "$g_presentation_obj_item_select_1", -1),
(assign, "$g_presentation_obj_item_select_2", -1),
(assign, "$g_presentation_obj_item_select_3", -1),
(assign, "$g_presentation_obj_item_select_4", -1),
(assign, "$g_presentation_obj_item_select_5", -1),
(assign, "$g_presentation_obj_item_select_6", -1),
(assign, "$g_presentation_obj_item_select_7", -1),
(assign, "$g_presentation_obj_item_select_8", -1),
(assign, "$g_presentation_obj_item_select_9", -1),
(assign, "$g_presentation_obj_item_select_10", -1),
(assign, "$g_presentation_obj_item_select_11", -1),
(assign, "$g_presentation_obj_item_select_12", -1),
(assign, "$g_presentation_obj_item_select_13", -1),
(assign, "$g_presentation_obj_item_select_14", -1),
(assign, "$g_presentation_obj_item_select_15", -1),
(assign, "$g_presentation_obj_item_select_16", -1),
(try_begin),
(neq, "$g_current_opened_item_details", -1),
(close_item_details),
(assign, "$g_current_opened_item_details", -1),
(try_end),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 0),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_1", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 526),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_1", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_1", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_1", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 475),
(overlay_set_position, "$g_presentation_obj_item_select_1", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 1),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_2", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 426),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_2", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_2", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_2", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 375),
(overlay_set_position, "$g_presentation_obj_item_select_2", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 2),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_3", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 326),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_3", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_3", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_3", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 275),
(overlay_set_position, "$g_presentation_obj_item_select_3", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 3),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_4", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 226),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_4", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_4", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_4", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 175),
(overlay_set_position, "$g_presentation_obj_item_select_4", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 4),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_5", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 576),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_5", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_5", "mesh_mp_inventory_slot_helmet", "mesh_mp_inventory_slot_helmet"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_5", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 525),
(overlay_set_position, "$g_presentation_obj_item_select_5", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 5),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_6", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 476),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_6", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_6", "mesh_mp_inventory_slot_armor", "mesh_mp_inventory_slot_armor"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_6", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 425),
(overlay_set_position, "$g_presentation_obj_item_select_6", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 6),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_7", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 376),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_7", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_7", "mesh_mp_inventory_slot_boot", "mesh_mp_inventory_slot_boot"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_7", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 325),
(overlay_set_position, "$g_presentation_obj_item_select_7", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 7),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_8", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 276),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_8", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_8", "mesh_mp_inventory_slot_glove", "mesh_mp_inventory_slot_glove"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_8", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 225),
(overlay_set_position, "$g_presentation_obj_item_select_8", pos1),
(store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 8),
(player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(try_begin),
(ge, ":selected_item_id", 0),
(eq, "$g_horses_are_avaliable", 1),
(create_image_button_overlay, "$g_presentation_obj_item_select_9", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 176),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_9", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_9", "mesh_mp_inventory_slot_horse", "mesh_mp_inventory_slot_horse"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_9", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_item_select_9", pos1),
(create_mesh_overlay, reg0, "mesh_mp_inventory_left"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 0),
(position_set_y, pos1, 14),
(overlay_set_position, reg0, pos1),
(create_mesh_overlay, reg0, "mesh_mp_inventory_right"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 894),
(position_set_y, pos1, 65),
(overlay_set_position, reg0, pos1),
(create_in_game_button_overlay, "$g_presentation_obj_item_select_10", "str_reset_to_default", 0),
(overlay_set_color, "$g_presentation_obj_item_select_10", 0xFFFFFF),
(position_set_x, pos1, 605),
(position_set_y, pos1, 25),
(overlay_set_position, "$g_presentation_obj_item_select_10", pos1),
(create_in_game_button_overlay, "$g_presentation_obj_item_select_11", "str_done", 0),
(overlay_set_color, "$g_presentation_obj_item_select_11", 0xFFFFFF),
(position_set_x, pos1, 395),
(position_set_y, pos1, 25),
(overlay_set_position, "$g_presentation_obj_item_select_11", pos1),
(assign, ":cur_y", 725),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_team_no, ":my_team_no", ":my_player_no"),
(assign, ":has_bots", 0),
(try_begin),
(eq, ":my_team_no", 0),
(try_begin),
(gt, "$g_multiplayer_num_bots_team_1", 0),
(assign, ":has_bots", 1),
(try_end),
(else_try),
(try_begin),
(gt, "$g_multiplayer_num_bots_team_2", 0),
(assign, ":has_bots", 1),
(try_end),
(try_end),
(team_get_faction, ":my_faction_no", ":my_team_no"),
(try_begin),
(eq, ":has_bots", 1),
(assign, ":num_lines", 0),
(try_begin),
(eq, ":has_bots", 1),
(try_for_range, ":ai_troop_no", multiplayer_ai_troops_begin, multiplayer_ai_troops_end),
(store_troop_faction, ":ai_troop_faction", ":ai_troop_no"),
(eq, ":ai_troop_faction", ":my_faction_no"),
(val_add, ":num_lines", 1),
(try_end),
(try_end),
(store_mul, ":board_height", ":num_lines", 20),
(val_add, ":board_height", 40),
(create_mesh_overlay, reg0, "mesh_mp_ui_command_border_r"),
(position_set_x, pos1, 280),
(position_set_y, pos1, 680),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 2500),
(position_set_y, pos1, 2500),
(overlay_set_size, reg0, pos1),
(create_mesh_overlay, reg0, "mesh_mp_ui_command_border_l"),
(position_set_x, pos1, 650),
(position_set_y, pos1, 680),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 2500),
(position_set_y, pos1, 2500),
(overlay_set_size, reg0, pos1),
(create_mesh_overlay, reg0, "mesh_mp_ui_command_panel"),
(position_set_x, pos1, 350),
(store_sub, ":board_pos_y", 750, ":board_height"),
(position_set_y, pos1, ":board_pos_y"),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 3000),
(position_set_y, pos1, 3000),
(overlay_set_size, reg0, pos1),
(create_text_overlay, reg0, "str_command", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 370),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", 20),
(assign, ":cur_ai_troop_index", 0),
(try_for_range, ":ai_troop_no", multiplayer_ai_troops_begin, multiplayer_ai_troops_end),
(store_troop_faction, ":ai_troop_faction", ":ai_troop_no"),
(eq, ":ai_troop_faction", ":my_faction_no"),
(create_check_box_overlay, reg0, "mesh_checkbox_off", "mesh_checkbox_on"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 377),
(store_add, ":special_cur_y", ":cur_y", 2),
(position_set_y, pos1, ":special_cur_y"),
(overlay_set_position, reg0, pos1),
(try_begin),
(eq, ":cur_ai_troop_index", 0),
(overlay_set_val, reg0, "$g_multiplayer_bot_type_1_wanted"),
(assign, "$g_presentation_obj_item_select_13", reg0),
(else_try),
(eq, ":cur_ai_troop_index", 1),
(overlay_set_val, reg0, "$g_multiplayer_bot_type_2_wanted"),
(assign, "$g_presentation_obj_item_select_14", reg0),
(else_try),
(eq, ":cur_ai_troop_index", 2),
(overlay_set_val, reg0, "$g_multiplayer_bot_type_3_wanted"),
(assign, "$g_presentation_obj_item_select_15", reg0),
(else_try),
(overlay_set_val, reg0, "$g_multiplayer_bot_type_4_wanted"),
(assign, "$g_presentation_obj_item_select_16", reg0),
(try_end),
(str_store_troop_name, s0, ":ai_troop_no"),
(create_text_overlay, reg0, "str_s0", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 397),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", 20),
(val_add, ":cur_ai_troop_index", 1),
(try_end),
(val_sub, ":cur_y", 20),
(try_end),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_gold, ":player_gold", ":my_player_no"),
(call_script, "script_multiplayer_calculate_cur_selected_items_cost", ":my_player_no", 1),
(create_text_overlay, "$g_presentation_obj_item_select_12", "str_total_item_cost_reg0", tf_left_align|tf_single_line|tf_with_outline),
(try_begin),
(ge, ":player_gold", reg0),
(overlay_set_color, "$g_presentation_obj_item_select_12", 0xFFFFFF),
(else_try),
(overlay_set_color, "$g_presentation_obj_item_select_12", 0xFF0000),
(try_end),
(position_set_x, pos1, 680),
(position_set_y, pos1, 652),
(overlay_set_position, "$g_presentation_obj_item_select_12", pos1),
(store_add, "$g_presentation_obj_item_select_next", "$g_presentation_obj_item_select_12", 1),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_mouse_enter_leave,
[(store_trigger_param_1, ":object"),
(store_trigger_param_2, ":enter_leave"),
(try_begin),
(eq, "$g_close_equipment_selection", 0),
(try_begin),
(eq, ":enter_leave", 0),
(assign, ":item_no", -1),
(try_begin),
(ge, ":object", "$g_presentation_obj_item_select_next"),
(store_sub, ":tested_object", ":object", "$g_presentation_obj_item_select_next"),
(store_mod, ":mod_value", ":tested_object", 2),
(store_sub, ":mod_value", 1, ":mod_value"),
(val_div, ":tested_object", 2),
(store_add, ":cur_slot", multi_data_item_button_indices_begin, ":tested_object"),
(troop_get_slot, ":item_no", "trp_multiplayer_data", ":cur_slot"),
(assign, ":target_obj", ":object"),
(val_add, ":target_obj", ":mod_value"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_1"),
(store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 1),
(val_sub, ":player_slot_index", 1),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(assign, ":target_obj", "$g_inside_obj_1"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_2"),
(store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 2),
(val_sub, ":player_slot_index", 1),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(assign, ":target_obj", "$g_inside_obj_2"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_3"),
(store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 3),
(val_sub, ":player_slot_index", 1),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(assign, ":target_obj", "$g_inside_obj_3"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_4"),
(store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 4),
(val_sub, ":player_slot_index", 1),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(assign, ":target_obj", "$g_inside_obj_4"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_5"),
(store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 5),
(val_sub, ":player_slot_index", 1),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(assign, ":target_obj", "$g_inside_obj_5"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_6"),
(store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 6),
(val_sub, ":player_slot_index", 1),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
(assign, ":target_obj", "$g_inside_obj_6"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_7"),
(store_add, ":player_slot_index", slot_player_selected_item_indices_begin, 7),
(val_sub, ":player_slot_index", 1),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_slot, ":item_no", ":my_player_no", ":player_slot_index"),
| |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from ..rpc_error import RPCError
class BadRequest(RPCError):
"""Bad Request"""
CODE = 400
"""``int``: RPC Error Code"""
NAME = __doc__
class AboutTooLong(BadRequest):
"""The provided about/bio text is too long"""
ID = "ABOUT_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AccessTokenExpired(BadRequest):
"""The bot token has expired"""
ID = "ACCESS_TOKEN_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AccessTokenInvalid(BadRequest):
"""The bot access token is invalid"""
ID = "ACCESS_TOKEN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminsTooMuch(BadRequest):
"""The chat has too many administrators"""
ID = "ADMINS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminRankEmojiNotAllowed(BadRequest):
"""Emoji are not allowed in custom administrator titles"""
ID = "ADMIN_RANK_EMOJI_NOT_ALLOWED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminRankInvalid(BadRequest):
"""The custom administrator title is invalid or is longer than 16 characters"""
ID = "ADMIN_RANK_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AlbumPhotosTooMany(BadRequest):
"""Too many photos were included in the album"""
ID = "ALBUM_PHOTOS_TOO_MANY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ApiIdInvalid(BadRequest):
"""The api_id/api_hash combination is invalid"""
ID = "API_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ApiIdPublishedFlood(BadRequest):
"""You are using an API key that is limited on the server side because it was published somewhere"""
ID = "API_ID_PUBLISHED_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ArticleTitleEmpty(BadRequest):
"""The article title is empty"""
ID = "ARTICLE_TITLE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AudioTitleEmpty(BadRequest):
"""The title attribute of the audio is empty"""
ID = "AUDIO_TITLE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthBytesInvalid(BadRequest):
"""The authorization bytes are invalid"""
ID = "AUTH_BYTES_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthTokenAlreadyAccepted(BadRequest):
"""The authorization token was already used"""
ID = "AUTH_TOKEN_ALREADY_ACCEPTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthTokenExpired(BadRequest):
"""The provided authorization token has expired and the updated QR-code must be re-scanned"""
ID = "AUTH_TOKEN_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthTokenInvalid(BadRequest):
"""An invalid authorization token was provided"""
ID = "AUTH_TOKEN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AutoarchiveNotAvailable(BadRequest):
"""This feature is not yet enabled for your account due to it not receiving too many private messages from strangers"""
ID = "AUTOARCHIVE_NOT_AVAILABLE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BankCardNumberInvalid(BadRequest):
"""The credit card number is invalid"""
ID = "BANK_CARD_NUMBER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BannedRightsInvalid(BadRequest):
"""You provided a set of restrictions that is invalid"""
ID = "BANNED_RIGHTS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BasePortLocInvalid(BadRequest):
"""The base port location is invalid"""
ID = "BASE_PORT_LOC_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotsTooMuch(BadRequest):
"""The chat has too many bots"""
ID = "BOTS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotChannelsNa(BadRequest):
"""Bots can't edit admin privileges"""
ID = "BOT_CHANNELS_NA"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotCommandDescriptionInvalid(BadRequest):
"""The command description was empty, too long or had invalid characters"""
ID = "BOT_COMMAND_DESCRIPTION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotDomainInvalid(BadRequest):
"""The domain used for the auth button does not match the one configured in @BotFather"""
ID = "BOT_DOMAIN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotGamesDisabled(BadRequest):
"""Bot games cannot be used in this type of chat"""
ID = "BOT_GAMES_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotGroupsBlocked(BadRequest):
"""This bot can't be added to groups"""
ID = "BOT_GROUPS_BLOCKED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotInlineDisabled(BadRequest):
"""The inline feature of the bot is disabled"""
ID = "BOT_INLINE_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotInvalid(BadRequest):
"""This is not a valid bot"""
ID = "BOT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotMethodInvalid(BadRequest):
"""The method can't be used by bots"""
ID = "BOT_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotMissing(BadRequest):
"""This method can only be run by a bot"""
ID = "BOT_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotPaymentsDisabled(BadRequest):
"""This method can only be run by a bot"""
ID = "BOT_PAYMENTS_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotPollsDisabled(BadRequest):
"""Sending polls by bots has been disabled"""
ID = "BOT_POLLS_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotResponseTimeout(BadRequest):
"""The bot did not answer to the callback query in time"""
ID = "BOT_RESPONSE_TIMEOUT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotScoreNotModified(BadRequest):
"""The bot score was not modified"""
ID = "BOT_SCORE_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastIdInvalid(BadRequest):
"""The channel is invalid"""
ID = "BROADCAST_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastPublicVotersForbidden(BadRequest):
"""Polls with public voters cannot be sent in channels"""
ID = "BROADCAST_PUBLIC_VOTERS_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastRequired(BadRequest):
"""The request can only be used with a channel"""
ID = "BROADCAST_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonDataInvalid(BadRequest):
"""The button callback data contains invalid data or exceeds 64 bytes"""
ID = "BUTTON_DATA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonTypeInvalid(BadRequest):
"""The type of one of the buttons you provided is invalid"""
ID = "BUTTON_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonUrlInvalid(BadRequest):
"""The button url is invalid"""
ID = "BUTTON_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallAlreadyAccepted(BadRequest):
"""The call is already accepted"""
ID = "CALL_ALREADY_ACCEPTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallAlreadyDeclined(BadRequest):
"""The call is already declined"""
ID = "CALL_ALREADY_DECLINED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallPeerInvalid(BadRequest):
"""The provided call peer object is invalid"""
ID = "CALL_PEER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallProtocolFlagsInvalid(BadRequest):
"""Call protocol flags invalid"""
ID = "CALL_PROTOCOL_FLAGS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CdnMethodInvalid(BadRequest):
"""The method can't be used on CDN DCs"""
ID = "CDN_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelsAdminPublicTooMuch(BadRequest):
"""You are an administrator of too many public channels"""
ID = "CHANNELS_ADMIN_PUBLIC_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelsTooMuch(BadRequest):
"""You have joined too many channels or supergroups, leave some and try again"""
ID = "CHANNELS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelBanned(BadRequest):
"""The channel is banned"""
ID = "CHANNEL_BANNED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelInvalid(BadRequest):
"""The channel parameter is invalid"""
ID = "CHANNEL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelPrivate(BadRequest):
"""The channel/supergroup is not accessible"""
ID = "CHANNEL_PRIVATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelTooLarge(BadRequest):
"""The channel is too large to be deleted; this error is issued when trying to delete channels with more than 1000 members (subject to change)"""
ID = "CHANNEL_TOO_LARGE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatAboutNotModified(BadRequest):
"""The chat about text was not modified because you tried to edit it using the same content"""
ID = "CHAT_ABOUT_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatAboutTooLong(BadRequest):
"""The chat about text is too long"""
ID = "CHAT_ABOUT_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatAdminRequired(BadRequest):
"""The method requires chat admin privileges"""
ID = "CHAT_ADMIN_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatIdEmpty(BadRequest):
"""The provided chat id is empty"""
ID = "CHAT_ID_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatIdInvalid(BadRequest):
"""The chat id being used is invalid or not known yet. Make sure you see the chat before interacting with it"""
ID = "CHAT_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatInvalid(BadRequest):
"""The chat is invalid"""
ID = "CHAT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatInvitePermanent(BadRequest):
"""The chat invite link is primary"""
ID = "CHAT_INVITE_PERMANENT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatLinkExists(BadRequest):
"""The action failed because the supergroup is linked to a channel"""
ID = "CHAT_LINK_EXISTS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatNotModified(BadRequest):
"""The chat settings (title, permissions, photo, etc..) were not modified because you | |
<gh_stars>10-100
from contextlib import contextmanager
from flask import current_app, url_for
from redis import StrictRedis
from woodwind import util
from woodwind.extensions import db
from woodwind.models import Feed, Entry
import sqlalchemy
import bs4
import datetime
import feedparser
import itertools
import json
import mf2py
import mf2util
import re
import requests
import rq
import sys
import time
import traceback
import urllib.parse
# normal update interval for polling feeds
UPDATE_INTERVAL = datetime.timedelta(hours=1)
# update interval when polling feeds that are push verified
UPDATE_INTERVAL_PUSH = datetime.timedelta(days=1)
TWITTER_RE = re.compile(
r'https?://(?:www\.|mobile\.)?twitter\.com/(\w+)/status(?:es)?/(\w+)')
TAG_RE = re.compile(r'</?(div|span)[^>]*?>')
COMMENT_RE = re.compile(r'<!--[^>]*?-->')
JAM_RE = re.compile(
'\s*\u266b (?:https?://)?[a-z0-9._\-]+\.[a-z]{2,9}(?:/\S*)?')
AUDIO_ENCLOSURE_TMPL = '<p><audio class="u-audio" src="{href}" controls '\
'preload=none ><a href="{href}">audio</a></audio></p>'
VIDEO_ENCLOSURE_TMPL = '<p><video class="u-video" src="{href}" controls '\
'preload=none ><a href="{href}">video</a></video></p>'
redis = StrictRedis()
q_high = rq.Queue('high', connection=redis)
q = rq.Queue('low', connection=redis)
_app = None
class Mf2Fetcher:
def __init__(self):
self.cache = {}
def __call__(self, url):
if url in self.cache:
return self.cache[url]
p = mf2py.parse(url=url)
self.cache[url] = p
return p
@contextmanager
def flask_app():
global _app
if _app is None:
from woodwind import create_app
_app = create_app()
with _app.app_context():
try:
yield _app
except:
_app.logger.exception('Unhandled exception')
def tick():
"""Checks all feeds to see if any of them are ready for an update.
Makes use of uWSGI timers to run every 5 minutes, without needing
a separate process to fire ticks.
"""
def should_update(feed, now):
if not feed.last_checked:
return True
if not feed.subscriptions:
return False
if feed.failure_count > 8:
update_interval = datetime.timedelta(days=1)
elif feed.failure_count > 4:
update_interval = datetime.timedelta(hours=8)
elif feed.failure_count > 2:
update_interval = datetime.timedelta(hours=4)
else:
update_interval = UPDATE_INTERVAL
# PuSH feeds don't need to poll very frequently
if feed.push_verified:
update_interval = max(update_interval, UPDATE_INTERVAL_PUSH)
return now - feed.last_checked > update_interval
with flask_app():
now = datetime.datetime.utcnow()
current_app.logger.info('Tick {}'.format(now))
for feed in Feed.query.all():
current_app.logger.debug(
'Feed %s last checked %s', feed, feed.last_checked)
if should_update(feed, now):
q.enqueue(update_feed, feed.id)
def update_feed(feed_id, content=None,
content_type=None, is_polling=True):
def is_expected_content_type(feed_type):
if not content_type:
return True
if feed_type == 'html':
return content_type == 'text/html'
if feed_type == 'xml':
return content_type in [
'application/rss+xml',
'application/atom+xml',
'application/rdf+xml',
'application/xml',
'text/xml',
]
with flask_app() as app:
feed = Feed.query.get(feed_id)
current_app.logger.info('Updating {}'.format(str(feed)[:32]))
now = datetime.datetime.utcnow()
new_entries = []
updated_entries = []
reply_pairs = []
fetch_mf2 = Mf2Fetcher()
try:
if content and is_expected_content_type(feed.type):
current_app.logger.info('using provided content. size=%d',
len(content))
else:
current_app.logger.info('fetching feed: %s', str(feed)[:32])
try:
response = util.requests_get(feed.feed)
except:
feed.last_response = 'exception while retrieving: {}'.format(
sys.exc_info()[0])
feed.failure_count += 1
return
if response.status_code // 100 != 2:
current_app.logger.warn(
'bad response from %s. %r: %r', feed.feed, response,
response.text)
feed.last_response = 'bad response while retrieving: {}: {}'.format(
response, response.text)
feed.failure_count += 1
return
feed.failure_count = 0
feed.last_response = 'success: {}'.format(response)
if is_polling:
check_push_subscription(feed, response)
content = get_response_content(response)
# backfill if this is the first pull
backfill = len(feed.entries) == 0
if feed.type == 'xml':
result = process_xml_feed_for_new_entries(
feed, content, backfill, now)
elif feed.type == 'html':
result = process_html_feed_for_new_entries(
feed, content, backfill, now, fetch_mf2)
else:
result = []
# realize list, only look at the first 30 entries
result = list(itertools.islice(result, 30))
old_entries = {}
all_uids = [e.uid for e in result]
if all_uids:
for entry in (Entry.query
.filter(Entry.feed == feed,
Entry.uid.in_(all_uids))
.order_by(Entry.id.desc())):
old_entries[entry.uid] = entry
for entry in result:
old = old_entries.get(entry.uid)
current_app.logger.debug(
'entry for uid %s: %s', entry.uid,
'found' if old else 'not found')
# have we seen this post before
if not old:
current_app.logger.debug('this is a new post, saving a new entry')
# set a default value for published if none is provided
entry.published = entry.published or now
in_reply_tos = entry.get_property('in-reply-to', [])
db.session.add(entry)
feed.entries.append(entry)
new_entries.append(entry)
for irt in in_reply_tos:
reply_pairs.append((entry, irt))
elif not is_content_equal(old, entry):
current_app.logger.debug('this post content has changed, updating entry')
entry.published = entry.published or old.published
in_reply_tos = entry.get_property('in-reply-to', [])
# we're updating an old entriy, use the original
# retrieved time
entry.retrieved = old.retrieved
old.feed = None # feed.entries.remove(old)
# punt on deleting for now, learn about cascade
# and stuff later
# session.delete(old)
db.session.add(entry)
feed.entries.append(entry)
updated_entries.append(entry)
for irt in in_reply_tos:
reply_pairs.append((entry, irt))
else:
current_app.logger.debug(
'skipping previously seen post %s', old.permalink)
fetch_reply_contexts(reply_pairs, now, fetch_mf2)
db.session.commit()
except:
db.session.rollback()
raise
finally:
if is_polling:
feed.last_checked = now
if new_entries or updated_entries:
feed.last_updated = now
db.session.commit()
if new_entries:
notify_feed_updated(app, feed_id, new_entries)
def check_push_subscription(feed, response):
def send_request(mode, hub, topic):
hub = urllib.parse.urljoin(feed.feed, hub)
topic = urllib.parse.urljoin(feed.feed, topic)
callback = url_for('push.notify', feed_id=feed.id, _external=True)
current_app.logger.debug(
'sending %s request for hub=%r, topic=%r, callback=%r',
mode, hub, topic, callback)
r = requests.post(hub, data={
'hub.mode': mode,
'hub.topic': topic,
'hub.callback': callback,
'hub.secret': feed.get_or_create_push_secret(),
'hub.verify': 'sync', # backcompat with 0.3
})
current_app.logger.debug('%s response %r', mode, r)
expiry = feed.push_expiry
old_hub = feed.push_hub
old_topic = feed.push_topic
hub = response.links.get('hub', {}).get('url')
topic = response.links.get('self', {}).get('url')
current_app.logger.debug('link headers. links=%s, hub=%s, topic=%s',
response.links, hub, topic)
if not hub or not topic:
# try to find link rel elements
if feed.type == 'html':
soup = bs4.BeautifulSoup(get_response_content(response))
if not hub:
hub_link = soup.find('link', rel='hub')
hub = hub_link and hub_link.get('href')
if not topic:
self_link = soup.find('link', rel='self')
topic = self_link and self_link.get('href')
elif feed.type == 'xml':
parsed = feedparser.parse(get_response_content(response))
links = parsed.feed.get('links')
if links:
if not hub:
hub = next((link['href'] for link in links
if 'hub' in link['rel']), None)
if not topic:
topic = next((link['href'] for link in links
if 'self' in link['rel']), None)
if ((expiry and expiry - datetime.datetime.utcnow()
<= UPDATE_INTERVAL_PUSH)
or hub != old_hub or topic != old_topic or not feed.push_verified):
current_app.logger.debug('push subscription expired or hub/topic changed')
feed.push_hub = hub
feed.push_topic = topic
feed.push_verified = False
feed.push_expiry = None
db.session.commit()
if old_hub and old_topic and hub != old_hub and topic != old_topic:
current_app.logger.debug('unsubscribing hub=%s, topic=%s', old_hub, old_topic)
send_request('unsubscribe', old_hub, old_topic)
if hub and topic:
current_app.logger.debug('subscribing hub=%s, topic=%s', hub, topic)
send_request('subscribe', hub, topic)
db.session.commit()
def notify_feed_updated(app, feed_id, entries):
"""Render the new entries and publish them to redis
"""
from flask import render_template
import flask.ext.login as flask_login
current_app.logger.debug('notifying feed updated: %s', feed_id)
feed = Feed.query.get(feed_id)
for s in feed.subscriptions:
with app.test_request_context():
flask_login.login_user(s.user, remember=True)
rendered = []
for e in entries:
e.subscription = s
rendered.append(render_template('_entry.jinja2', entry=e))
message = json.dumps({
'user': s.user.id,
'feed': feed.id,
'subscription': s.id,
'entries': rendered,
})
topics = []
if not s.exclude:
topics.append('user:{}'.format(s.user.id))
topics.append('subsc:{}'.format(s.id))
for topic in topics:
redis.publish('woodwind_notify:{}'.format(topic), message)
def is_content_equal(e1, e2):
"""The criteria for determining if an entry that we've seen before
has been updated. If any of these fields have changed, we'll scrub the
old entry and replace it with the updated one.
"""
def normalize(content):
"""Strip HTML tags, added to prevent a specific case where Wordpress
syntax highlighting (crayon) generates slightly different
markup every time it's called.
"""
if content:
content = TAG_RE.sub('', content)
content = COMMENT_RE.sub('', content)
return content
return (
e1.title == e2.title and
normalize(e1.content) == normalize(e2.content) and
e1.author_name == e2.author_name and
e1.author_url == e2.author_url and
e1.author_photo == e2.author_photo and
e1.properties == e2.properties and
e1.published == e2.published and
e1.updated == e2.updated and
e1.deleted == e2.deleted
)
def process_xml_feed_for_new_entries(feed, content, backfill, now):
current_app.logger.debug('fetching xml feed: %s', str(feed)[:32])
parsed = feedparser.parse(content, response_headers={
'content-location': feed.feed,
})
feed_props = parsed.get('feed', {})
default_author_url = feed_props.get('author_detail', {}).get('href')
default_author_name = feed_props.get('author_detail', {}).get('name')
default_author_photo = feed_props.get('logo')
current_app.logger.debug('found %d entries', len(parsed.entries))
# work from the bottom up (oldest first, usually)
for p_entry in reversed(parsed.entries):
current_app.logger.debug('processing entry %s', str(p_entry)[:32])
permalink = p_entry.get('link')
uid = p_entry.get('id') or permalink
if not uid:
continue
if 'updated_parsed' in p_entry and p_entry.updated_parsed:
try:
updated = datetime.datetime.fromtimestamp(
time.mktime(p_entry.updated_parsed))
except:
current_app.logger.debug('mktime failed with updated timestamp: %v', p_entry.updated_parsed)
else:
updated = None
if 'published_parsed' in p_entry and p_entry.published_parsed:
try:
published = datetime.datetime.fromtimestamp(
time.mktime(p_entry.published_parsed))
except:
current_app.logger.debug('mktime failed with published timestamp: %v', p_entry.published_parsed)
published = updated
else:
published = updated
retrieved = now
if backfill and published:
retrieved = published
title = p_entry.get('title')
content = None
content_list = p_entry.get('content')
if content_list:
content = content_list[0].value
else:
content = p_entry.get('summary')
if title and content:
title_trimmed = title.rstrip('...').rstrip('…')
if content.startswith(title_trimmed):
title = None
for link in p_entry.get('links', []):
link_type = link.get('type')
if link_type in ['audio/mpeg', 'audio/mp3']:
audio = AUDIO_ENCLOSURE_TMPL.format(href=link.get('href'))
content = (content or '') + audio
if link_type in ['video/x-m4v', 'video/x-mp4', 'video/mp4']:
video = VIDEO_ENCLOSURE_TMPL.format(href=link.get('href'))
content = (content or '') + video
yield Entry(
published=published,
updated=updated,
uid=uid,
permalink=permalink,
retrieved=retrieved,
title=p_entry.get('title'),
content=content,
content_cleaned=util.clean(content),
author_name=p_entry.get('author_detail', {}).get('name') or
default_author_name,
author_url=p_entry.get('author_detail', {}).get('href') or
default_author_url,
author_photo=default_author_photo or
fallback_photo(feed.origin))
def process_html_feed_for_new_entries(feed, content, backfill, now, fetch_mf2_func):
# strip noscript tags before parsing, since | |
depth of depressions. Tricky thing is that one might be
# devoured by another, so would need to be removed from the list.
def _identify_depressions_and_outlets(self, reroute_flow=True):
"""Find depression and lakes on a topographic surface.
Find and map the depressions/lakes in a topographic surface,
given a previously identified list of pits (if any) in the surface.
"""
self._pits_flooded = 0
self._unique_pits = np.zeros_like(self.pit_node_ids, dtype=bool)
#debug_count = 0
for pit_node in self.pit_node_ids:
if self.flood_status[pit_node] != _PIT:
from landlab import BAD_INDEX_VALUE
self.depression_outlets.append(BAD_INDEX_VALUE)
else:
self.find_depression_from_pit(pit_node, reroute_flow)
self._pits_flooded += 1
assert len(self.depression_outlets) == self._unique_pits.size
self.unique_lake_outlets = np.array(self.depression_outlets
)[self._unique_pits]
def map_depressions(self, pits='flow__sink_flag', reroute_flow=True):
"""Map depressions/lakes in a topographic surface.
Parameters
----------
pits : array or str or None, optional
If a field name, the boolean field containing True where pits.
If an array, either a boolean array of nodes of the pits, or an
array of pit node IDs. It does not matter whether or not open
boundary nodes are flagged as pits; they are never treated as such.
Default is 'flow__sink_flag', the pit field output from
'route_flow_dn'
reroute_flow : bool, optional
If True (default), and the component detects the output fields in
the grid produced by the route_flow_dn component, this component
will modify the existing flow fields to route the flow across the
lake surface(s).
Ensure you call this method *after* you have already routed flow
in each loop of your model.
Examples
--------
Test #1: 5x5 raster grid with a diagonal lake.
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components.flow_routing import (
... DepressionFinderAndRouter)
>>> rg = RasterModelGrid(5, 5)
>>> z = rg.add_zeros('node', 'topographic__elevation')
>>> z[:] = np.array([100., 100., 95., 100., 100.,
... 100., 101., 92., 1., 100.,
... 100., 101., 2., 101., 100.,
... 100., 3., 101., 101., 100.,
... 90., 95., 100., 100., 100.])
>>> df = DepressionFinderAndRouter(rg)
>>> df.map_depressions(pits=None, reroute_flow=False)
>>> df.display_depression_map() # doctest: +NORMALIZE_WHITESPACE
. . . . .
. . . ~ .
. . ~ . .
. ~ . . .
o . . . .
"""
if self._bc_set_code != self.grid.bc_set_code:
self.updated_boundary_conditions()
self._bc_set_code = self.grid.bc_set_code
self._lake_map.fill(LOCAL_BAD_INDEX_VALUE)
self.depression_outlet_map.fill(LOCAL_BAD_INDEX_VALUE)
self.depression_depth.fill(0.)
self.depression_outlets = [] # reset these
# Locate nodes with pits
if type(pits) == str:
try:
pits = self._grid.at_node[pits]
supplied_pits = np.where(pits)[0]
self.pit_node_ids = as_id_array(
np.setdiff1d(supplied_pits, self._grid.boundary_nodes))
self.number_of_pits = self.pit_node_ids.size
self.is_pit.fill(False)
self.is_pit[self.pit_node_ids] = True
except FieldError:
self._find_pits()
elif pits is None:
self._find_pits()
else: # hopefully an array or other sensible iterable
if len(pits) == self._grid.number_of_nodes:
supplied_pits = np.where(pits)[0]
else: # it's an array of node ids
supplied_pits = pits
# remove any boundary nodes from the supplied pit list
self.pit_node_ids = as_id_array(
np.setdiff1d(supplied_pits, self._grid.boundary_nodes))
self.number_of_pits = self.pit_node_ids.size
self.is_pit.fill(False)
self.is_pit[self.pit_node_ids] = True
# Set up "lake code" array
self.flood_status.fill(_UNFLOODED)
self.flood_status[self.pit_node_ids] = _PIT
self._identify_depressions_and_outlets(reroute_flow)
if reroute_flow and ('flow__receiver_node' in
self._grid.at_node.keys()):
self.receivers = self._grid.at_node['flow__receiver_node']
self.sinks = self._grid.at_node['flow__sink_flag']
self.grads = self._grid.at_node['topographic__steepest_slope']
self._route_flow()
self._reaccumulate_flow()
def _find_unresolved_neighbors(self, nbrs, receivers):
"""Make and return list of neighbors of node with unresolved flow dir.
Examples
--------
>>> import numpy as np
>>> from landlab.components import DepressionFinderAndRouter
>>> from landlab import RasterModelGrid
>>> rg = RasterModelGrid((7, 8))
>>> z = rg.add_zeros('node', 'topographic__elevation')
>>> df = DepressionFinderAndRouter(rg)
>>> rcvr = np.arange(56)
>>> rcvr[13] = -1
>>> rcvr[21] = -1
>>> rcvr[29] = -1
>>> rcvr[30] = -1
>>> nbrs = np.array([23, 30, 21, 14], dtype=int)
>>> df._find_unresolved_neighbors(nbrs, rcvr)
array([30, 21])
"""
#unresolved = np.where(receivers[nbrs] == -1)[0]
#ur_nbrs = nbrs[unresolved]
#ur_links = self._grid.links_at_node[unresolved]
#return (ur_nbrs, ur_links)
return nbrs[np.where(receivers[nbrs] == -1)[0]]
def _find_unresolved_neighbors_new(self, nbrs, nbr_links, receivers):
"""Make and return list of neighbors of node with unresolved flow dir.
Examples
--------
>>> import numpy as np
>>> from landlab.components import DepressionFinderAndRouter
>>> from landlab import RasterModelGrid
>>> rg = RasterModelGrid((7, 8))
>>> z = rg.add_zeros('node', 'topographic__elevation')
>>> df = DepressionFinderAndRouter(rg)
>>> rcvr = np.arange(56)
>>> rcvr[13] = -1
>>> rcvr[21] = -1
>>> rcvr[29] = -1
>>> rcvr[30] = -1
>>> nbrs = rg.neighbors_at_node[22]
>>> nbr_links = rg.links_at_node[22]
>>> df._find_unresolved_neighbors_new(nbrs, nbr_links, rcvr)
(array([30, 21]), array([43, 35]))
>>> nbrs = rg._diagonal_neighbors_at_node[22]
>>> nbr_links = rg._diagonal_links_at_node[22]
>>> df._find_unresolved_neighbors_new(nbrs, nbr_links, rcvr)
(array([29, 13]), array([136, 121]))
"""
unresolved = np.where(receivers[nbrs] == -1)[0]
ur_nbrs = nbrs[unresolved]
ur_links = nbr_links[unresolved]
return (ur_nbrs, ur_links)
def _route_flow_for_one_lake(self, outlet, lake_nodes):
"""Route flow across a single lake. Alternative to part of _route_flow.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> from landlab.components import DepressionFinderAndRouter
>>> rg = RasterModelGrid((7, 8))
>>> z = rg.add_zeros('node', 'topographic__elevation')
>>> rcvr = rg.add_zeros('node', 'flow__receiver_node', dtype=int)
>>> rcvr[:] = np.arange(rg.number_of_nodes)
>>> lake_nodes = np.array([10, 12, 13, 19, 20, 21, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 37, 38, 44, 45, 46])
>>> rcvr[9] = 1
>>> rcvr[11] = 3
>>> rcvr[14] = 6
>>> rcvr[17] = 16
>>> rcvr[18] = 17
>>> rcvr[22] = 14 # this is the outlet
>>> rcvr[41] = 40
>>> rcvr[42] = 50
>>> rcvr[43] = 51
>>> df = DepressionFinderAndRouter(rg)
>>> df.receivers = rcvr
>>> df._route_flow_for_one_lake(22, lake_nodes)
>>> df.receivers
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 19, 3, 13, 22, 6, 15, 16,
16, 17, 20, 21, 22, 14, 23, 24, 26, 27, 28, 29, 22, 22, 31, 32, 34,
35, 36, 29, 29, 30, 39, 40, 40, 50, 51, 36, 37, 38, 47, 48, 49, 50,
51, 52, 53, 54, 55])
"""
# Flag receiver nodes inside the lake as "unresolved"
UNRESOLVED = -1
self.receivers[lake_nodes] = UNRESOLVED
# We work with two lists: the nodes currently being processed, and
# the nodes that will be processed on the next iteration. We start with
# the outlet node as the one being processed, and an empty list of
# nodes to process next.
nodes_being_processed = [outlet]
nodes_to_proc_next = []
# We must now iterate until we've taken care of all the nodes in the
# lake. In each iteration, we:
# 1 - find the unresolved neighbors of nodes being processed
# 2 - point them toward the nodes being processed
# 3 - place them on the nodes_to_proc_next list
# We stop when there are no more nodes to process.
# Note that the nested looping will be slow, but could be sped up
# by translating to cython.
counter = 0 # counts # of times thru loop as fail-safe
done = False
while not done:
# Get unresolved "regular" neighbors of the current nodes
for cn in nodes_being_processed:
# Get active and unresolved neighbors of cn
(nbrs, lnks) = self._find_unresolved_neighbors_new(
self.grid.neighbors_at_node[cn],
self.grid.links_at_node[cn], self.receivers)
# They will now flow to cn
if nbrs.size > 0:
self.receivers[nbrs] = cn
if 'flow__link_to_receiver_node' in self._grid.at_node:
self._grid.at_node['flow__link_to_receiver_node'][nbrs] = lnks
slopes = ((self._elev[nbrs] - self._elev[cn]) /
self._grid.length_of_link[lnks])
self._grid.at_node['topographic__steepest_slope'][nbrs] = np.maximum(slopes, 0.0)
# Place them on the list of nodes to process next
for n in nbrs:
nodes_to_proc_next.append(n)
# If we're working with a raster that has diagonals, do the same
# for the diagonal neighbors
if self._D8:
# Get unresolved "regular" neighbors of the current nodes
for cn in nodes_being_processed:
# Get active and unresolved diagonal neighbors of cn
# nbrs = self._find_unresolved_neighbors(
# self._grid._get_diagonal_list(cn), self.receivers)
(nbrs, diags) = self._find_unresolved_neighbors_new(
self._grid._diagonal_neighbors_at_node[cn],
self._grid._diagonal_links_at_node[cn],
self.receivers)
# They will now flow to cn
if nbrs.size > 0:
self.receivers[nbrs] = cn
if 'flow__link_to_receiver_node' in self._grid.at_node:
self._grid.at_node['flow__link_to_receiver_node'][nbrs] = diags
slopes = ((self._elev[nbrs] - self._elev[cn]) /
self._diag_link_length)
self._grid.at_node['topographic__steepest_slope'][nbrs] = np.maximum(slopes, 0.0)
# Place them on the list of nodes to process next
for n in nbrs:
nodes_to_proc_next.append(n)
# Move to the next set of nodes
nodes_being_processed = nodes_to_proc_next
nodes_to_proc_next = []
if not nodes_being_processed:
done = True
# Just in case
counter += 1
assert (counter < self._grid.number_of_nodes), 'inf loop in lake'
def _route_flow(self):
"""Route flow across lake flats.
Route flow across lake flats, which have already been identified.
"""
# Process each lake.
for outlet_node, lake_code in zip(self.lake_outlets, self.lake_codes):
# Get the nodes in the lake
| |
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton, QLabel, QVBoxLayout, QWidget
from PyQt5 import QtWidgets, QtCore,QtGui
#from PyQt5 import uic
import sys
from PyQt5.uic import loadUi
import os # We need sys so that we can pass argv to QApplication
import pandas as pd
from pyqtgraph import PlotWidget, plot
import pyqtgraph as pg
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)
import numpy as np
import random
import xlsxwriter
#methodes
import back_codes.lissage_expo_triple_vf as LissageholtWinters
from sklearn.metrics import mean_squared_error #calcul des erreurs
import back_codes.arima_function as Arima
import back_codes.lissage_simple_et_double_vf as lissage_simple_et_double
##dossiers
path = os.getcwd()
print("Current Directory", path)
parent = os.path.abspath(os.path.join(path, os.pardir))
#centrer ecran
screen_center = lambda widget: QApplication.desktop().screen().rect().center()- widget.rect().center()
#methode2
def centerWidgetOnScreen(widget):
centerPoint = QtGui.QScreen.availableGeometry(QtWidgets.QApplication.primaryScreen()).center()
fg = widget.frameGeometry()
fg.moveCenter(centerPoint)
widget.move(fg.topLeft())
# evaluate forecasts
def erreur(données,predictions):
rmse = sqrt(mean_squared_error(données, predictions))
print('Test RMSE: %.3f' % rmse)
##################################################################################################DATA
Type = {"T": "Temporelle",
"C": "Causal"}
mType = {"Ad": 'additive',
"Mu": 'multiplicative'}
Methodes = {
"LS": "Lissage simple",
"LD": "Lissage double",
"LT": "Lissage triple",
"LST": "LSTM",
"RL": "Regression lineaire",
"AR": "ARIMA"
}
Indices_méthodes = {
"LS": 1,
"LD": 2,
"LT": 3,
"RL": 4,
"LST":5,
"RP": 6,
"AR": 7
}
#initialisations
ListScreen = []
ListeEcrans = []
nb_screen = 0
Dict = {}
df = pd.read_csv('data\\sampledata.csv', usecols=[0], engine='python')
Data = {
"TypeSerie": Type["C"],
"ListeScreensEmpillés": ListScreen,
"ListeEcrans": ListeEcrans,
"nbScreen": nb_screen,
"DictionnairesDesIndex": Dict,
"DataFrame": df,
"mType": mType['Ad'],
"Dict_Methodes_Choisies": [1,2,3,7]
}
ListScreen = [1] #listes des number des ecrans en ordre!!!
##################################################################################################METHODS
def indexScreen(screenNumber):
return Dict[screenNumber]
def goToScreen(widget,screenNumber):
if screenNumber >8 and screenNumber!=13: widget.setFixedWidth(2.1*640);widget.setFixedHeight(1.5*480)
else: widget.setFixedWidth(640);widget.setFixedHeight(480)
widget.setCurrentIndex(indexScreen(screenNumber))
widget.move(screen_center(widget))
centerWidgetOnScreen(widget)
ListScreen.append(screenNumber)
print(ListScreen)
print("On est passé à l'écran: ",screenNumber)
if screenNumber==5:
os.startfile("data\\excl.xlsx")
print('''Data["TypeSerie"]''', Data["TypeSerie"])
def screenBackFrom(widget,screenNumber): #to screen
NumberOfThescreenToLeft = ListScreen.pop()
print(ListScreen)
if NumberOfThescreenToLeft!= screenNumber: print("a huge pb")
toScreen = ListScreen[-1]
if toScreen >8: widget.setFixedWidth(2.1*640);widget.setFixedHeight(1.5*480)
else: widget.setFixedWidth(640);widget.setFixedHeight(480)
widget.move(screen_center(widget))
centerWidgetOnScreen(widget)
widget.setCurrentIndex(indexScreen(toScreen))
#print(Data)
#def connectToScreen(screenNumber,buton,widget): #numero de l'écran; trouver le bouton dans le design; mettre le stackedWidget
# buton.clicked.connect(lambda x: goToScreen(widget, screenNumber))
def connectToScreen(buton,widget,screenToGONumber): #numero de l'écran; trouver le bouton dans le design; mettre le stackedWidget
buton.clicked.connect(lambda x: goToScreen(widget, screenToGONumber))
def BackFromScreen(buton,widget,screenToLeftNumber): #numero de l'écran; trouver le bouton dans le design; mettre le stackedWidget
buton.clicked.connect(lambda x: screenBackFrom(widget, screenToLeftNumber))
###########import into excel file
#exporter mles donnees dans une table excel
def to_excel(data): #data est une liste
workbook = xlsxwriter.Workbook()
months = ('January', 'February', 'March',
'April','May', 'June', 'July',
'August', 'September', 'October',
'November','December'
)
workbook = xlsxwriter.Workbook('data\\CreatedByCode.xlsx')
ws = workbook.add_worksheet()
ws.write("A1", "Months")
ws.write("B1", "Values")
for i in range(len(data)):
#Months
ws.write("A{0}".format(i + 2), months[i % 12])
#Data
ws.write("B{0}".format(i + 2), data[i])
workbook.close()
#exporter mles résulats dans une table excel
def to_excel_data(data): #data est une liste
workbook = xlsxwriter.Workbook()
months = ('January', 'February', 'March',
'April','May', 'June', 'July',
'August', 'September', 'October',
'November','December'
)
workbook = xlsxwriter.Workbook('data\\excl.xlsx')
ws = workbook.add_worksheet()
ws.write("A1", "Months")
ws.write("B1", "Values")
for i in range(len(data)):
#Months
ws.write("A{0}".format(i + 2), months[i % 12])
#Data
ws.write("B{0}".format(i + 2), data[i])
workbook.close()
def MAD(données, previsions):
MAD = 0
n = min(len(données),len(previsions))
for i in range (n):
MAD+= abs(données[i]-previsions[i])
return int(MAD)
#il retourbne le MSE et le RMSE
#il prends deux listes
def MSE(données, previsions):
MSE = 0
n = min(len(données),len(previsions))
for i in range (n):
MSE+= abs((données[i]-previsions[i])**2)
RMSE = np.sqrt(MSE)
return int(MSE), int(RMSE/n)
##################################################################################################SCREENS
class MainWindow(QDialog):
def __init__(self):
super(MainWindow,self).__init__()
loadUi("files\\page1.ui",self)
screenNumber = 3
connectToScreen(self.pushButton, widget,screenNumber)
# class MainWindow(QDialog):
# def __init__(self):
# super(MainWindow,self).__init__()
# loadUi("files\\page1.ui",self)
# self.pushButton.clicked.connect(self.goToScreen3)
# def goToScreen3(self):
# screen3 = Screen3()
# widget.addWidget(screen3)
# widget.setCurrentIndex(widget.currentIndex()+1)
# widget.setCurrentIndex(indexScreen(3))
# class MainWindow(QDialog):
# def __init__(self):
# super(MainWindow,self).__init__()
# loadUi("files\\page1.ui",self)
# self.pushButton.clicked.connect(self.goToScreen3)
# def goToScreen3(self):
# screen3 = Screen3()
# widget.addWidget(screen3)
# widget.setCurrentIndex(widget.currentIndex()+1)
class Screen2(QDialog):
def __init__(self):
super(Screen2,self).__init__()
loadUi("files\\page2.ui",self)
self.setWindowTitle("screen2")
#print("screen2: ", widget.currentIndex())
connectToScreen(self.pushButton, widget,screenToGONumber=3)
class Screen3(QDialog):
def __init__(self):
super(Screen3,self).__init__()
loadUi("files\\page3.ui",self)
self.setWindowTitle("screen3")
#print("check= ", self.ChoixserieTemporelle.isChecked())
#print("screen3: ", widget.currentIndex())
BackFromScreen(self.retour, widget, screenToLeftNumber=3)
#self.retour.clicked.connect(lambda x: print(self.ChoixserieTemporelle.isChecked()))
self.openExcel.clicked.connect(lambda x: self.goToScreen5(widget))
def goToScreen5(self,widget):
isTemporel = self.ChoixserieTemporelle.isChecked()
Data["TypeSerie"]= Type["T"] if isTemporel else Type["C"]
#print("toooooooooooooooooooooooooooooo")
goToScreen(widget, screenNumber=5)
class Screen4(QDialog):
def __init__(self):
super(Screen4, self).__init__()
self.setWindowTitle("screen4")
loadUi("files\\page4.ui", self)
connectToScreen(self.valider, widget,screenToGONumber=5)#self.pushButton.clicked.connect(self.goToScreen5)
BackFromScreen(self.retour, widget, screenToLeftNumber=4)
self.AfficherDonnées()
#connectToScreen(self.valider, widget, screenToGONumber=41)
def AfficherDonnées(self):
Data["DataFrame"] = pd.read_csv('data\\sampledata.csv', usecols=[0], engine='python')
print("Column headings:")
print(Data["DataFrame"].columns)
self.graphWidget = pg.PlotWidget()
#self.setCentralWidget(self.graphWidget)
hour = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
temperature = [30, 32, 34, 32, 33, 31, 29, 32, 35, 45]
# plot data: x, y values
self.graphWidget.plot(hour, temperature)
class ScreenGraph41(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(ScreenGraph41, self).__init__(*args, **kwargs)
self.graphWidget = pg.PlotWidget()
self.setCentralWidget(self.graphWidget)
hour = [1,2,3,4,5,6,7,8,9,10]
temperature = [30,32,34,32,33,31,29,32,35,45]
# plot data: x, y values
self.graphWidget.plot(hour, temperature)
class Screen5(QDialog):
def __init__(self):
super(Screen5, self).__init__()
loadUi("files\\page5.ui", self)
self.setWindowTitle("screen5")
self.enregistrer.clicked.connect(self.goToScreen)
#connectToScreen(self.enregistrer, widget,screenToGONumber=6 if Data["TypeSerie"] == Type["T"] else 7)
BackFromScreen(self.retour, widget, screenToLeftNumber=5)
self.pushButton_generate_random_signal.setObjectName("pushButton_6")
self.pushButton_generate_random_signal.clicked.connect(self.retrieve_Excel)
self.navi_toolbar = NavigationToolbar(self.MplWidget.canvas, self) #mplwidget comme nom de.. dans le desiign
self.navi_toolbar.setGeometry(QtCore.QRect(10, 10, 200, 100))
self.myverticalLayout.addWidget(self.navi_toolbar)
#self.myverticalLayout.setGeometry(QtCore.QRect(0, 130, 341, 251))
#self.setLayout( self.myverticalLayout )
def goToScreen(self):
if Data["TypeSerie"] == Type["T"] :
screenToGONumber = 6
elif Data["TypeSerie"] == Type["C"] :
screenToGONumber = 7
else: screenToGONumber = 1
goToScreen(widget, screenToGONumber)
def retrieve_Excel(self):
fichierEnregistré = True
if fichierEnregistré == True:
Array = Data["DataFrame"].values
self.prevision()
def prevision(self):
DataFrame = Data['DataFrame']
try:
sdata = open('sampledata.csv')
except:
sdata = open('data\\sampledata.csv')
tsA = sdata.read().split('\n')
tsA = list(map(int, tsA))
self.M = METHODES_DE_PREDICTION(Data['mType'],DataFrame)
self.AfficherLissageTriple(self.M)
def AfficherLissageTriple(self,M):
toPlot,tsA,logs= self.M.LissageTriple()
self.MplWidget.canvas.axes.clear()
#self.MplWidget.canvas.axes.plot(toPlot)
self.MplWidget.canvas.axes.plot(tsA)
self.MplWidget.canvas.axes.legend(('prevision'), loc='upper right')
self.MplWidget.canvas.axes.set_title('courbe de la prevision')
self.MplWidget.canvas.draw()
# def update_graph1(self):
# fs = 500
# f = random.randint(1, 100)
# ts = 1/fs
# length_of_signal = 100
# t = np.linspace(0,1,length_of_signal)
# cosinus_signal = np.cos(2*np.pi*f*t)
# sinus_signal = np.sin(2*np.pi*f*t)
# self.MplWidget.canvas.axes.clear()
# self.MplWidget.canvas.axes.plot(t, cosinus_signal)
# self.MplWidget.canvas.axes.plot(t, sinus_signal)
# self.MplWidget.canvas.axes.legend(('cosinus', 'sinus'),loc='upper right')
# self.MplWidget.canvas.axes.set_title('Cosinus - Sinus Signal')
# self.MplWidget.canvas.draw()
#afficher la tendance obtenue dans l'hypothèse chronologique
class Screen6(QDialog):
def __init__(self):
super(Screen6, self).__init__()
self.setWindowTitle("screen6")
loadUi("files\\page6.ui", self)
connectToScreen(self.continuer, widget, screenToGONumber=9)
BackFromScreen(self.retour, widget, screenToLeftNumber=6)
#self.afficher()
class Screen7(QDialog):
def __init__(self):
super(Screen7, self).__init__()
self.setWindowTitle("screen7")
loadUi("files\\page7.ui", self)
connectToScreen(self.continuer, widget, screenToGONumber=8)
BackFromScreen(self.retour_2, widget, screenToLeftNumber=7)
#self.continuer.clicked.connect(self.goToScreen8)
class Screen8(QDialog):
def __init__(self):
super(Screen8, self).__init__()
self.M = 0
loadUi("files\\page8.ui", self)
connectToScreen(self.continuer, widget, screenToGONumber=9)
BackFromScreen(self.retour, widget, screenToLeftNumber=8)
class Screen9(QDialog):
def __init__(self):
super(Screen9, self).__init__()
loadUi("files\\page9.ui", self)
connectToScreen(self.AffinerPrevision, widget, screenToGONumber=13)
connectToScreen(self.comparerMethodes, widget, screenToGONumber=13)
connectToScreen(self.ChoisirMethode, widget, screenToGONumber=13)
#connectToScreen(self.exporterResultats, widget, screenToGONumber=1)
BackFromScreen(self.retour, widget, screenToLeftNumber=9)
###pour les courbes
self.pushButton_generate_random_signal_2.setObjectName("pushButton_6")
self.pushButton_generate_random_signal_2.clicked.connect(self.prevision)
def prevision(self):
DataFrame = Data['DataFrame']
try:
sdata = open('data\\sampledata.csv')
except:
sdata = open('sampledata.csv')
tsA = sdata.read().split('\n')
tsA = list(map(int, tsA))
#to_excel_data(tsA) #creer un fichier et mettre les donnees pour ke user. Il peut modifier
self.M = METHODES_DE_PREDICTION(Data['mType'],DataFrame)
self.AfficherLissageTriple(self.M)
def AfficherLissageTriple(self,M):
toPlot,tsA,logs= self.M.LissageTriple()
self.MplWidget_2.canvas.axes.clear()
self.MplWidget_2.canvas.axes.plot(toPlot)
self.MplWidget_2.canvas.axes.plot(tsA)
self.MplWidget_2.canvas.axes.legend(('prevision' 'realite'), loc='upper right')
self.MplWidget_2.canvas.axes.set_title('courbe de la prevision')
self.MplWidget_2.canvas.draw()
class Screen10(QDialog):
def __init__(self):
super(Screen10, self).__init__()
self.M = 0
loadUi("files\\page100.ui", self)
BackFromScreen(self.retour, widget, screenToLeftNumber=10)
# connectToScreen(self.parametres1, widget, screenToGONumber=13)
# connectToScreen(self.parametres1, widget, screenToGONumber=14)
# connectToScreen(self.parametres1, widget, screenToGONumber=15)
# connectToScreen(self.parametres1, widget, screenToGONumber=16)
self.afficher.setObjectName("pushButton_6")
self.afficher.clicked.connect(self.prevision)
self.ajuster1.clicked.connect(self.ajuster)
self.ajuster2.clicked.connect(self.ajuster)
self.ajuster3.clicked.connect(self.ajuster)
self.ajuster4.clicked.connect(self.ajuster)
self.progressBar.setValue(0)
def ajuster(self):
#vider les plot
self.courbe1.canvas.axes.clear()
self.courbe2.canvas.axes.clear()
self.courbe3.canvas.axes.clear()
self.courbe4.canvas.axes.clear()
self.progressBar.setValue(0)
self.afficher.setText("Rechargez les courbes")
screenBackFrom(widget,screenNumber=10)
def exporter(self,i,Liste_previsions):
to_excel(Liste_previsions)
###pour les courbes
##self.prevision()
#self.afficher.setObjectName("pushButton_6")
#self.afficher.clicked.connect(self.prevision)
# def prevision(self):
# DataFrame = Data['DataFrame']
# self.M = METHODES_DE_PREDICTION(Data['mType'],DataFrame)
# Array = DataFrame.values
# self.AfficherLissageTriple(Array)
# self.methode1(self.courbe1,Array) #lissage triple
# self.methode2(self.courbe2,Array) #arima
##self.methode3(self.courbe3,Array)
##self.methode4(self.courbe4,Array)
# def methode1(self,mplWidget,Array):
# toPlot = self.M.LissageTriple()
# toPlot = toPlot[0]
# temps = Array[:,0]
##les demandes seront en position 1 dans le dataframe
# demande = Array[:,1] #[30, 32, 34, 32, 33, 31, 29, 32, 35, 45]
# mplWidget.canvas.axes.clear()
# mplWidget.canvas.axes.plot(toPlot)
# mplWidget.canvas.axes.plot(temps, demande)
# mplWidget.canvas.axes.legend(('lissage triple prevision' 'realite'), loc='upper right')
# mplWidget.canvas.axes.set_title('courbe de la prevision')
# mplWidget.canvas.draw()
##calcul du mad
# print("---------------------------------------------")
# print("(len(demande)= ", len(demande))
# print("len(toPlot) = ", len(toPlot))
def prevision(self):
DataFrame = Data['DataFrame']
self.M = METHODES_DE_PREDICTION(Data['mType'])
Array = DataFrame.values
self.progressBar.setValue(0);
self.AfficherLissageTriple()
self.progressBar.setValue(25);self.AfficherLissageSimple()
self.progressBar.setValue(50);self.methode2(self.courbe2)
self.progressBar.setValue(75);self.AfficherLissageDouble()
self.progressBar.setValue(100)
def AfficherLissageTriple(self):
CoeffLissageSimplre = Data['Dict_Methodes_Choisies'][3]
alpha = CoeffLissageSimplre[0]
beta = CoeffLissageSimplre[1]
beta = CoeffLissageSimplre[2]
toPlot,tsA,logs= self.M.LissageTriple()
#des tests
print("---------------------------------")
print("predictions triple: ",toPlot) #toPlot et tsA sont des listes
print("len = ",len(toPlot))
print(len(tsA))#144
n = min(len(tsA),len(toPlot))
#les erreurs
données, previsions = tsA, toPlot
mad = MAD(données, previsions)
mse, rmse = MSE(données, previsions)
self.mad1.setText(str(mad))
self.mse1.setText(str(mse))
self.rmse1.setText(str(rmse))
#les courbes
self.courbe1.canvas.axes.clear()
self.courbe1.canvas.axes.plot(toPlot,color='g', label='series')
self.courbe1.canvas.axes.plot(tsA,color='r', label='result')
self.courbe1.canvas.axes.legend(('prevision' 'realite'), loc='upper right')
self.courbe1.canvas.axes.set_title('courbe de la prevision')
self.courbe1.canvas.draw()
self.exporter1.clicked.connect(lambda x: self.exporter(1,toPlot))
###calcul du mad
def AfficherLissageSimple(self):
CoeffLissageSimplre = Data['Dict_Methodes_Choisies'][1]
alpha = CoeffLissageSimplre[0]
toPlot,tsA,logs= self.M.LissageSimple(alpha)
#to_excel_data(tsA)
print("---------------------------------")
print("predictions simple: ",toPlot)
print("len = ",len(toPlot))
print(len(tsA))
#les erreurs
données, previsions = tsA, toPlot
mad = MAD(données, previsions)
mse, rmse = MSE(données, previsions)
self.mad3.setText(str(mad))
self.mse3.setText(str(mse))
self.rmse3.setText(str(rmse))
#les courbes
| |
self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(victim_or_survivor='Y')\
.exclude(survivor_of__in=[0,9])\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts['Not a victim'] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True)
def ws_s07(self, ws):
"""
Cols: Family status; Sex
Rows: Country
:: Newspaper, television, radio
"""
secondary_counts = OrderedDict()
for code, answer in YESNO:
counts = Counter()
for media_type, model in tm_person_models.items():
country = model.sheet_name() + '__country'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(family_role=code)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts[answer] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True)
def ws_s08(self, ws):
"""
Cols: Quoted; Sex
Rows: Country
:: Newspaper only
"""
secondary_counts = OrderedDict()
model = person_models.get('Print')
for code, answer in YESNO:
counts = Counter()
country = model.sheet_name() + '__country'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(is_quoted=code)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts[answer] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True)
def ws_s09(self, ws):
"""
Cols: Photographed; Sex
Rows: Country
:: Newspaper only
"""
secondary_counts = OrderedDict()
model = person_models.get('Print')
for code, answer in IS_PHOTOGRAPH:
counts = Counter()
country = model.sheet_name() + '__country'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(is_photograph=code)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts[answer] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True)
def ws_s10(self, ws):
"""
Cols: Media; Journo Type; Sex
Rows: Country
:: Newspaper only
"""
c = 1
r = 8
write_row_headings = True
for media_type, model in tm_journalist_models.items():
if media_type in broadcast_journalist_models:
presenter_reporter = [('Presenter',[1, 3]), ('Reporter', [2])]
else:
# Newspaper journos don't have roles
presenter_reporter = [('Reporter', [])]
col = c + (1 if write_row_headings else 0)
merge_range = (len(presenter_reporter) * len(self.male_female) * 2) - 1
ws.merge_range(r-4, col, r-4, col + merge_range, clean_title(media_type), self.col_heading)
secondary_counts = OrderedDict()
for journo_type, role_ids in presenter_reporter:
counts = Counter()
country = model.sheet_name() + '__country'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.annotate(n=Count('id'))
if media_type in REPORTER_MEDIA:
# Newspaper journos don't have roles
rows = rows.filter(role__in=role_ids)
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts[journo_type] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, c=c, r=r, write_row_headings=write_row_headings, row_perc=True, show_N=True)
c += (len(presenter_reporter) * len(self.male_female) * 2) + (1 if write_row_headings else 0)
write_row_headings = False
def ws_s11(self, ws):
"""
Cols: Major topics; Sex
Rows: Country
:: Newspaper, television, radio
"""
secondary_counts = OrderedDict()
for major_topic, topic_ids in GROUP_TOPICS_MAP.items():
counts = Counter()
for media_type, model in tm_journalist_models.items():
country = model.sheet_name() + '__country'
topic = model.sheet_name() + '__topic'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(**{topic + '__in': topic_ids})\
.annotate(n=Count('id'))
if media_type in REPORTER_MEDIA:
rows = rows.filter(role=REPORTERS)
counts.update({(r['sex'], self.recode_country(r[country])): r['n'] for r in rows})
major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0]
secondary_counts[major_topic_name] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True)
def ws_s12(self, ws):
"""
Cols: Major topics; Women Central
Rows: Country
:: Newspaper, television, radio
"""
counts = Counter()
for media_type, model in tm_sheet_models.items():
rows = model.objects\
.values('topic', 'country')\
.filter(country__in=self.country_list)\
.filter(about_women='Y')\
.annotate(n=Count('id'))
for row in rows:
major_topic = TOPIC_GROUPS[row['topic']]
counts.update({(major_topic, self.recode_country(row['country'])): row['n']})
self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, raw_values=True, write_col_totals=False)
def ws_s13(self, ws):
"""
Cols: Journalist Sex, Subject Sex
Rows: Country
:: Newspaper, television, radio
"""
secondary_counts = OrderedDict()
for sex_id, sex in self.male_female:
counts = Counter()
for media_type, model in tm_person_models.items():
sheet_name = model.sheet_name()
journo_name = model._meta.get_field(model.sheet_name()).remote_field.model.journalist_field_name()
country = model.sheet_name() + '__country'
rows = model.objects\
.values('sex', country)\
.filter(**{model.sheet_name() + '__country__in':self.country_list})\
.filter(**{sheet_name + '__' + journo_name + '__sex':sex_id})\
.filter(sex__in=self.male_female_ids)\
.annotate(n=Count('id'))
if media_type in REPORTER_MEDIA:
rows = rows.filter(**{sheet_name + '__' + journo_name + '__role':REPORTERS})
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts[sex] = counts
secondary_counts['col_title_def'] = [
'Sex of reporter',
'Sex of news subject']
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True)
def ws_s14(self, ws):
"""
Cols: Stereotypes
Rows: Country
:: Newspaper, television, radio
"""
counts = Counter()
for media_type, model in tm_sheet_models.items():
rows = model.objects\
.values('stereotypes', 'country')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['stereotypes'], self.recode_country(row['country'])): row['n']})
self.tabulate(ws, counts, AGREE_DISAGREE, self.countries, row_perc=True, show_N=True)
def ws_s15(self, ws):
"""
Cols: Gender inequality
Rows: Country
:: Newspaper, television, radio
"""
counts = Counter()
for media_type, model in tm_sheet_models.items():
rows = model.objects\
.values('inequality_women', 'country')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['inequality_women'], self.recode_country(row['country'])): row['n']})
self.tabulate(ws, counts, AGREE_DISAGREE, self.countries, row_perc=True, show_N=True)
def ws_s16(self, ws):
"""
Cols: Equality rights
Rows: Country
:: Newspaper, television, radio
"""
counts = Counter()
for media_type, model in tm_sheet_models.items():
rows = model.objects\
.values('equality_rights', 'country')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['equality_rights'], self.recode_country(row['country'])): row['n']})
self.tabulate(ws, counts, YESNO, self.countries, row_perc=True, show_N=True)
def ws_s17(self, ws):
"""
Cols: Sex of reporters and subjects
Rows: Country
:: Internet, Twitter
"""
c = 1
for media_type, model in dm_journalist_models.items():
self.write_primary_row_heading(ws, media_type, c=c+1, r=4)
secondary_counts = OrderedDict()
counts = Counter()
country = model.sheet_name() + '__country'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts['Reporter'] = counts
counts = Counter()
model =dm_person_models[media_type]
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts['Subjects'] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8)
c = ws.dim_colmax + 2
def ws_s18(self, ws):
"""
Cols: Sex of subjects
Rows: Country
:: Internet, Twitter
"""
c = 1
for media_type, model in dm_person_models.items():
self.write_primary_row_heading(ws, media_type, c=c+1, r=4)
counts = Counter()
country = model.sheet_name() + '__country'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
self.tabulate(ws, counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=7)
c = ws.dim_colmax + 2
def ws_s19(self, ws):
"""
Cols: Major topics; Sex
Rows: Country
:: Internet, Twitter
"""
c = 1
for media_type, model in dm_person_models.items():
self.write_primary_row_heading(ws, media_type, c=c+1, r=4)
secondary_counts = OrderedDict()
for major_topic, topic_ids in GROUP_TOPICS_MAP.items():
counts = Counter()
country = model.sheet_name() + '__country'
topic = model.sheet_name() + '__topic'
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(**{topic + '__in': topic_ids})\
.annotate(n=Count('id'))
counts.update({(r['sex'], self.recode_country(r[country])): r['n'] for r in rows})
major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0]
secondary_counts[major_topic_name] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8)
c = ws.dim_colmax + 2
def ws_s20(self, ws):
"""
Cols: Occupation; Sex
Rows: Country
:: Internet, Twitter
"""
c = 1
for media_type, model in dm_person_models.items():
if not media_type == 'Twitter':
self.write_primary_row_heading(ws, media_type, c=c+1, r=4)
secondary_counts = OrderedDict()
country = model.sheet_name() + '__country'
for occupation_id, occupation in OCCUPATION:
counts = Counter()
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(occupation=occupation_id)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts[clean_title(occupation)] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8)
c = ws.dim_colmax + 2
def ws_s21(self, ws):
"""
Cols: Function; Sex
Rows: Country
:: Internet, Twitter
"""
c = 1
for media_type, model in dm_person_models.items():
if not media_type == 'Twitter':
self.write_primary_row_heading(ws, media_type, c=c+1, r=4)
secondary_counts = OrderedDict()
country = model.sheet_name() + '__country'
for function_id, function in FUNCTION:
counts = Counter()
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(function=function_id)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts[clean_title(function)] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8)
c = ws.dim_colmax + 2
def ws_s22(self, ws):
"""
Cols: Victims; Sex
Rows: Country
:: Internet, Twitter
"""
c = 1
for media_type, model in dm_person_models.items():
if not media_type == 'Twitter':
self.write_primary_row_heading(ws, media_type, c=c+1, r=4)
secondary_counts = OrderedDict()
country = model.sheet_name() + '__country'
counts = Counter()
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(victim_or_survivor='Y')\
.exclude(victim_of=0)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts['Victim'] = counts
counts = Counter()
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(victim_or_survivor='N')\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
rows = model.objects\
.values('sex', country)\
.filter(**{country + '__in': self.country_list})\
.filter(sex__in=self.male_female_ids)\
.filter(victim_or_survivor='Y')\
.exclude(survivor_of=0)\
.annotate(n=Count('id'))
for row in rows:
counts.update({(row['sex'], self.recode_country(row[country])): row['n']})
secondary_counts['Not a victim'] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8)
c = ws.dim_colmax + 2
def ws_s23(self, ws):
"""
Cols: Quoted; Sex
Rows: Country
:: Internet, Twitter
"""
c | |
N, 2)
Q_ij = uv_i.matvecmult(n_j - n_i) # (N, N, 2)
# Concatenate:
PQ_ij = P_ij.concat(Q_ij) # (N, N, 2+2)
# Covariances, with a scale-dependent weight:
PPt_PQt_ij = P_ij.tensorprod(PQ_ij) # (N, N, 2*(2+2))
PPt_PQt_ij = window_ij * PPt_PQt_ij # (N, N, 2*(2+2))
# Reduction - with batch support:
PPt_PQt_ij.ranges = ranges
PPt_PQt = PPt_PQt_ij.sum(1) # (N, 2*(2+2))
# Reshape to get the two covariance matrices:
PPt_PQt = PPt_PQt.view(N, 2, 2, 2)
PPt, PQt = PPt_PQt[:, :, 0, :], PPt_PQt[:, :, 1, :] # (N, 2, 2), (N, 2, 2)
# Add a small ridge regression:
PPt[:, 0, 0] += reg
PPt[:, 1, 1] += reg
# (minus) Shape operator, i.e. the differential of the Gauss map:
# = (PPt^-1 @ PQt) : simple estimation through linear regression
S = torch.solve(PQt, PPt).solution
a, b, c, d = S[:, 0, 0], S[:, 0, 1], S[:, 1, 0], S[:, 1, 1] # (N,)
# Normalization
mean_curvature = a + d
gauss_curvature = a * d - b * c
features += [mean_curvature.clamp(-1, 1), gauss_curvature.clamp(-1, 1)]
features = torch.stack(features, dim=-1)
return features
# Fast tangent convolution layer ===============================================
class ContiguousBackward(torch.autograd.Function):
"""
Function to ensure contiguous gradient in backward pass. To be applied after PyKeOps reduction.
N.B.: This workaround fixes a bug that will be fixed in ulterior KeOp releases.
"""
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output.contiguous()
class dMaSIFConv(nn.Module):
def __init__(
self, in_channels=1, out_channels=1, radius=1.0, hidden_units=None, cheap=False
):
"""Creates the KeOps convolution layer.
I = in_channels is the dimension of the input features
O = out_channels is the dimension of the output features
H = hidden_units is the dimension of the intermediate representation
radius is the size of the pseudo-geodesic Gaussian window w_ij = W(d_ij)
This affordable layer implements an elementary "convolution" operator
on a cloud of N points (x_i) in dimension 3 that we decompose in three steps:
1. Apply the MLP "net_in" on the input features "f_i". (N, I) -> (N, H)
2. Compute H interaction terms in parallel with:
f_i = sum_j [ w_ij * conv(P_ij) * f_j ]
In the equation above:
- w_ij is a pseudo-geodesic window with a set radius.
- P_ij is a vector of dimension 3, equal to "x_j-x_i"
in the local oriented basis at x_i.
- "conv" is an MLP from R^3 to R^H:
- with 1 linear layer if "cheap" is True;
- with 2 linear layers and C=8 intermediate "cuts" otherwise.
- "*" is coordinate-wise product.
- f_j is the vector of transformed features.
3. Apply the MLP "net_out" on the output features. (N, H) -> (N, O)
A more general layer would have implemented conv(P_ij) as a full
(H, H) matrix instead of a mere (H,) vector... At a much higher
computational cost. The reasoning behind the code below is that
a given time budget is better spent on using a larger architecture
and more channels than on a very complex convolution operator.
Interactions between channels happen at steps 1. and 3.,
whereas the (costly) point-to-point interaction step 2.
lets the network aggregate information in spatial neighborhoods.
Args:
in_channels (int, optional): numper of input features per point. Defaults to 1.
out_channels (int, optional): number of output features per point. Defaults to 1.
radius (float, optional): deviation of the Gaussian window on the
quasi-geodesic distance `d_ij`. Defaults to 1..
hidden_units (int, optional): number of hidden features per point.
Defaults to out_channels.
cheap (bool, optional): shall we use a 1-layer deep Filter,
instead of a 2-layer deep MLP? Defaults to False.
"""
super(dMaSIFConv, self).__init__()
self.Input = in_channels
self.Output = out_channels
self.Radius = radius
self.Hidden = self.Output if hidden_units is None else hidden_units
self.Cuts = 8 # Number of hidden units for the 3D MLP Filter.
self.cheap = cheap
# For performance reasons, we cut our "hidden" vectors
# in n_heads "independent heads" of dimension 8.
self.heads_dim = 8 # 4 is probably too small; 16 is certainly too big
# We accept "Hidden" dimensions of size 1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, ...
if self.Hidden < self.heads_dim:
self.heads_dim = self.Hidden
if self.Hidden % self.heads_dim != 0:
raise ValueError(f"The dimension of the hidden units ({self.Hidden})"\
+ f"should be a multiple of the heads dimension ({self.heads_dim}).")
else:
self.n_heads = self.Hidden // self.heads_dim
# Transformation of the input features:
self.net_in = nn.Sequential(
nn.Linear(self.Input, self.Hidden), # (H, I) + (H,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Hidden, self.Hidden), # (H, H) + (H,)
# nn.LayerNorm(self.Hidden),#nn.BatchNorm1d(self.Hidden),
nn.LeakyReLU(negative_slope=0.2),
) # (H,)
self.norm_in = nn.GroupNorm(4, self.Hidden)
# self.norm_in = nn.LayerNorm(self.Hidden)
# self.norm_in = nn.Identity()
# 3D convolution filters, encoded as an MLP:
if cheap:
self.conv = nn.Sequential(
nn.Linear(3, self.Hidden), nn.ReLU() # (H, 3) + (H,)
) # KeOps does not support well LeakyReLu
else:
self.conv = nn.Sequential(
nn.Linear(3, self.Cuts), # (C, 3) + (C,)
nn.ReLU(), # KeOps does not support well LeakyReLu
nn.Linear(self.Cuts, self.Hidden),
) # (H, C) + (H,)
# Transformation of the output features:
self.net_out = nn.Sequential(
nn.Linear(self.Hidden, self.Output), # (O, H) + (O,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Output, self.Output), # (O, O) + (O,)
# nn.LayerNorm(self.Output),#nn.BatchNorm1d(self.Output),
nn.LeakyReLU(negative_slope=0.2),
) # (O,)
self.norm_out = nn.GroupNorm(4, self.Output)
# self.norm_out = nn.LayerNorm(self.Output)
# self.norm_out = nn.Identity()
# Custom initialization for the MLP convolution filters:
# we get interesting piecewise affine cuts on a normalized neighborhood.
with torch.no_grad():
nn.init.normal_(self.conv[0].weight)
nn.init.uniform_(self.conv[0].bias)
self.conv[0].bias *= 0.8 * (self.conv[0].weight ** 2).sum(-1).sqrt()
if not cheap:
nn.init.uniform_(
self.conv[2].weight,
a=-1 / np.sqrt(self.Cuts),
b=1 / np.sqrt(self.Cuts),
)
nn.init.normal_(self.conv[2].bias)
self.conv[2].bias *= 0.5 * (self.conv[2].weight ** 2).sum(-1).sqrt()
def forward(self, points, nuv, features, ranges=None):
"""Performs a quasi-geodesic interaction step.
points, local basis, in features -> out features
(N, 3), (N, 3, 3), (N, I) -> (N, O)
This layer computes the interaction step of Eq. (7) in the paper,
in-between the application of two MLP networks independently on all
feature vectors.
Args:
points (Tensor): (N,3) point coordinates `x_i`.
nuv (Tensor): (N,3,3) local coordinate systems `[n_i,u_i,v_i]`.
features (Tensor): (N,I) input feature vectors `f_i`.
ranges (6-uple of integer Tensors, optional): low-level format
to support batch processing, as described in the KeOps documentation.
In practice, this will be built by a higher-level object
to encode the relevant "batch vectors" in a way that is convenient
for the KeOps CUDA engine. Defaults to None.
Returns:
(Tensor): (N,O) output feature vectors `f'_i`.
"""
# 1. Transform the input features: -------------------------------------
features = self.net_in(features) # (N, I) -> (N, H)
features = features.transpose(1, 0)[None, :, :] # (1,H,N)
features = self.norm_in(features)
features = features[0].transpose(1, 0).contiguous() # (1, H, N) -> (N, H)
# 2. Compute the local "shape contexts": -------------------------------
# 2.a Normalize the kernel radius:
points = points / (sqrt(2.0) * self.Radius) # (N, 3)
# 2.b Encode the variables as KeOps LazyTensors
# Vertices:
x_i = LazyTensor(points[:, None, :]) # (N, 1, 3)
x_j = LazyTensor(points[None, :, :]) # (1, N, 3)
# WARNING - Here, we assume that the normals are fixed:
normals = (
nuv[:, 0, :].contiguous().detach()
) # (N, 3) - remove the .detach() if needed
# Local bases:
nuv_i = LazyTensor(nuv.view(-1, 1, 9)) # (N, 1, 9)
# Normals:
n_i = nuv_i[:3] # (N, 1, 3)
n_j = LazyTensor(normals[None, :, :]) # (1, N, 3)
# To avoid register spilling when using large embeddings, we perform our KeOps reduction
# over the vector of length "self.Hidden = self.n_heads * self.heads_dim"
# as self.n_heads reduction over vectors of length self.heads_dim (= "Hd" in the comments).
head_out_features = []
for head in range(self.n_heads):
# Extract a slice of width Hd from the feature array
head_start = head * self.heads_dim
head_end = head_start + self.heads_dim
head_features = features[:, head_start:head_end].contiguous() # (N, H) -> (N, Hd)
# Features:
f_j = LazyTensor(head_features[None, :, :]) # (1, N, Hd)
# Convolution parameters:
if self.cheap:
# Extract a slice of Hd lines: (H, 3) -> (Hd, 3)
A = self.conv[0].weight[head_start:head_end, :].contiguous() | |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, <NAME> <<EMAIL>>
# Copyright 2018, <NAME> <<EMAIL>>
'''
Devices
=======
This module contains two classes describing UPnP devices.
:class:`Device`
---------------
The base class for all devices.
:class:`RootDevice`
-------------------
A device representing a root device.
'''
import time
from lxml import etree
from eventdispatcher import EventDispatcher, Property, ListProperty
from twisted.internet import defer
from coherence import log
from coherence.upnp.core import utils
from coherence.upnp.core.service import Service
from . import xml_constants
ns = xml_constants.UPNP_DEVICE_NS
class Device(EventDispatcher, log.LogAble):
'''
Represents a UPnP's device, but this is not a root device, it's the base
class used for any device. See :class:`RootDevice` if you want a root
device.
.. versionchanged:: 0.9.0
* Migrated from louie/dispatcher to EventDispatcher
* The emitted events changed:
- Coherence.UPnP.Device.detection_completed =>
device_detection_completed
- Coherence.UPnP.Device.remove_client =>
device_remove_client
* New events: device_service_notified, device_got_client
* Changes some class variables to benefit from the EventDispatcher's
properties:
- :attr:`client`
- :attr:`devices`
- :attr:`services`
- :attr:`client`
- :attr:`detection_completed`
'''
logCategory = 'device'
client = Property(None)
'''
Defined by :class:`~coherence.upnp.devices.controlpoint.ControlPoint`.
It should be one of:
- Initialized instance of a class
:class:`~coherence.upnp.devices.media_server_client.MediaServerClient`
- Initialized instance of a class
:class:`~coherence.upnp.devices.media_renderer_client.MediaRendererClient`
- Initialized instance of a class
:class:`~coherence.upnp.devices.internet_gateway_device_client.InternetGatewayDeviceClient`
Whenever a client is set an event will be sent notifying it by
:meth:`on_client`.
''' # noqa
icons = ListProperty([])
'''A list of the device icons.'''
devices = ListProperty([])
'''A list of the device devices.'''
services = ListProperty([])
'''A list of the device services.'''
detection_completed = Property(False)
'''
To know whenever the device detection has completed. Defaults to `False`
and it will be set automatically to `True` by the class method
:meth:`receiver`.
'''
def __init__(self, parent=None, udn=None):
log.LogAble.__init__(self)
EventDispatcher.__init__(self)
self.register_event(
'device_detection_completed',
'device_remove_client',
'device_service_notified',
'device_got_client',
)
self.parent = parent
self.udn = udn
# self.uid = self.usn[:-len(self.st)-2]
self.friendly_name = ''
self.device_type = ''
self.upnp_version = 'n/a'
self.friendly_device_type = '[unknown]'
self.device_type_version = 0
def __repr__(self):
return (
f'embedded device {self.friendly_name} '
+ f'{self.device_type}, parent {self.parent}'
)
# def __del__(self):
# # print('Device removal completed')
# pass
def as_dict(self):
d = {
'device_type': self.get_device_type(),
'friendly_name': self.get_friendly_name(),
'udn': self.get_id(),
'services': [x.as_dict() for x in self.services],
}
icons = []
for icon in self.icons:
icons.append(
{
'mimetype': icon['mimetype'],
'url': icon['url'],
'height': icon['height'],
'width': icon['width'],
'depth': icon['depth'],
}
)
d['icons'] = icons
return d
def remove(self, *args):
self.info(f'removal of {self.friendly_name} {self.udn}')
while len(self.devices) > 0:
device = self.devices.pop()
self.debug(f'try to remove {device}')
device.remove()
while len(self.services) > 0:
service = self.services.pop()
self.debug(f'try to remove {service}')
service.remove()
if self.client is not None:
self.dispatch_event('device_remove_client', self.udn, self.client)
self.client = None
# del self
return True
def receiver(self, *args, **kwargs):
if self.detection_completed:
return
for s in self.services:
if not s.detection_completed:
return
self.dispatch_event('device_service_notified', service=s)
if self.udn is None:
return
self.detection_completed = True
if self.parent is not None:
self.info(
f'embedded device {self.friendly_name} '
+ f'{self.device_type} initialized, parent {self.parent}'
)
if self.parent is not None:
self.dispatch_event('device_detection_completed', self.parent)
else:
self.dispatch_event('device_detection_completed', self)
def service_detection_failed(self, device):
self.remove()
def get_id(self):
return self.udn
def get_uuid(self):
return self.udn[5:]
def get_embedded_devices(self):
return self.devices
def get_embedded_device_by_type(self, type):
r = []
for device in self.devices:
if type == device.friendly_device_type:
r.append(device)
return r
def get_services(self):
return self.services
def get_service_by_type(self, service_type):
if not isinstance(service_type, (tuple, list)):
service_type = [service_type]
for service in self.services:
_, _, _, service_class, version = service.service_type.split(':')
if service_class in service_type:
return service
def add_service(self, service):
'''
Add a service to the device. Also we check if service already notified,
and trigger the callback if needed. We also connect the device to
service in case the service still not completed his detection in order
that the device knows when the service has completed his detection.
Args:
service (object): A service which should be an initialized instance
of :class:`~coherence.upnp.core.service.Service`
'''
self.debug(f'add_service {service}')
if service.detection_completed:
self.receiver(service)
service.bind(
service_detection_completed=self.receiver,
service_detection_failed=self.service_detection_failed,
)
self.services.append(service)
# fixme: This fails as Service.get_usn() is not implemented.
def remove_service_with_usn(self, service_usn):
for service in self.services:
if service.get_usn() == service_usn:
service.unbind(
service_detection_completed=self.receiver,
service_detection_failed=self.service_detection_failed,
)
self.services.remove(service)
service.remove()
break
def add_device(self, device):
self.debug(f'Device add_device {device}')
self.devices.append(device)
def get_friendly_name(self):
return self.friendly_name
def get_device_type(self):
return self.device_type
def get_friendly_device_type(self):
return self.friendly_device_type
def get_markup_name(self):
try:
return self._markup_name
except AttributeError:
self._markup_name = (
f'{self.friendly_device_type}:{self.device_type_version} '
+ f'{self.friendly_name}'
)
return self._markup_name
def get_device_type_version(self):
return self.device_type_version
def set_client(self, client):
self.client = client
def get_client(self):
return self.client
def on_client(self, *args):
'''
Automatically triggered whenever a client is set or changed. Emmit
an event notifying that the client has changed.
.. versionadded:: 0.9.0
'''
self.dispatch_event('device_got_client', self, client=self.client)
def renew_service_subscriptions(self):
''' iterate over device's services and renew subscriptions '''
self.info(f'renew service subscriptions for {self.friendly_name}')
now = time.time()
for service in self.services:
self.info(
f'check service {service.id} {service.get_sid()} '
+ f'{service.get_timeout()} {now}'
)
if service.get_sid() is not None:
if service.get_timeout() < now:
self.debug(
f'wow, we lost an event subscription for '
+ f'{self.friendly_name} {service.get_id()}, '
+ f'maybe we need to rethink the loop time and '
+ f'timeout calculation?'
)
if service.get_timeout() < now + 30:
service.renew_subscription()
for device in self.devices:
device.renew_service_subscriptions()
def unsubscribe_service_subscriptions(self):
'''Iterate over device's services and unsubscribe subscriptions '''
sl = []
for service in self.get_services():
if service.get_sid() is not None:
sl.append(service.unsubscribe())
dl = defer.DeferredList(sl)
return dl
def parse_device(self, d):
self.info(f'parse_device {d}')
self.device_type = d.findtext(f'./{{{ns}}}deviceType')
(
self.friendly_device_type, self.device_type_version,
) = self.device_type.split(':')[-2:]
self.friendly_name = d.findtext(f'./{{{ns}}}friendlyName')
self.udn = d.findtext(f'./{{{ns}}}UDN')
self.info(f'found udn {self.udn} {self.friendly_name}')
try:
self.manufacturer = d.findtext(f'./{{{ns}}}manufacturer')
except Exception:
pass
try:
self.manufacturer_url = d.findtext(f'./{{{ns}}}manufacturerURL')
except Exception:
pass
try:
self.model_name = d.findtext(f'./{{{ns}}}modelName')
except Exception:
pass
try:
self.model_description = d.findtext(f'./{{{ns}}}modelDescription')
except Exception:
pass
try:
self.model_number = d.findtext(f'./{{{ns}}}modelNumber')
except Exception:
pass
try:
self.model_url = d.findtext(f'./{{{ns}}}modelURL')
except Exception:
pass
try:
self.serial_number = d.findtext(f'./{{{ns}}}serialNumber')
except Exception:
pass
try:
self.upc = d.findtext(f'./{{{ns}}}UPC')
except Exception:
pass
try:
self.presentation_url = d.findtext(f'./{{{ns}}}presentationURL')
except Exception:
pass
try:
for dlna_doc in d.findall(
'./{urn:schemas-dlna-org:device-1-0}X_DLNADOC'
):
try:
self.dlna_dc.append(dlna_doc.text)
except AttributeError:
self.dlna_dc = []
self.dlna_dc.append(dlna_doc.text)
except Exception:
pass
try:
for dlna_cap in d.findall(
'./{urn:schemas-dlna-org:device-1-0}X_DLNACAP'
):
for cap in dlna_cap.text.split(','):
try:
self.dlna_cap.append(cap)
except AttributeError:
self.dlna_cap = []
self.dlna_cap.append(cap)
except Exception:
pass
icon_list = d.find(f'./{{{ns}}}iconList')
if icon_list is not None:
from urllib.parse import urlparse
url_base = '%s://%s' % urlparse(self.get_location())[:2]
for icon in icon_list.findall(f'./{{{ns}}}icon'):
try:
i = {}
i['mimetype'] = icon.find(f'./{{{ns}}}mimetype').text
i['width'] = icon.find(f'./{{{ns}}}width').text
i['height'] = icon.find(f'./{{{ns}}}height').text
i['depth'] = icon.find(f'./{{{ns}}}depth').text
i['realurl'] = icon.find(f'./{{{ns}}}url').text
i['url'] = self.make_fullyqualified(i['realurl']).decode(
'utf-8'
)
self.icons.append(i)
self.debug(f'adding icon {i} for {self.friendly_name}')
except Exception as e:
import traceback
self.debug(traceback.format_exc())
self.warning(
f'device {self.friendly_name} seems to have an invalid'
+ f' icon description, ignoring that icon [error: {e}]'
)
serviceList = d.find(f'./{{{ns}}}serviceList')
if serviceList is not None:
for service in serviceList.findall(f'./{{{ns}}}service'):
serviceType = service.findtext(f'{{{ns}}}serviceType')
serviceId = service.findtext(f'{{{ns}}}serviceId')
controlUrl = service.findtext(f'{{{ns}}}controlURL')
eventSubUrl = service.findtext(f'{{{ns}}}eventSubURL')
presentationUrl = service.findtext(f'{{{ns}}}presentationURL')
scpdUrl = service.findtext(f'{{{ns}}}SCPDURL')
# check if values are somehow reasonable
if len(scpdUrl) == 0:
self.warning('service has no uri for its description')
continue
if len(eventSubUrl) == 0:
self.warning('service has no uri for eventing')
continue
if len(controlUrl) == 0:
self.warning('service has no uri for controling')
continue
try:
self.add_service(
Service(
serviceType,
serviceId,
self.get_location(),
controlUrl,
eventSubUrl,
presentationUrl,
scpdUrl,
self,
)
)
except Exception as e:
self.error(
f'Error on adding service: {service} [ERROR: {e}]'
)
# now look for all sub devices
embedded_devices = d.find(f'./{{{ns}}}deviceList')
if embedded_devices is not None:
for d in embedded_devices.findall(f'./{{{ns}}}device'):
embedded_device = Device(self)
self.add_device(embedded_device)
embedded_device.parse_device(d)
self.receiver()
def get_location(self):
return self.parent.get_location()
def get_usn(self):
return self.parent.get_usn()
def get_upnp_version(self):
return self.parent.get_upnp_version()
def get_urlbase(self):
return self.parent.get_urlbase()
def get_presentation_url(self):
try:
return self.make_fullyqualified(self.presentation_url)
except Exception:
return ''
def get_parent_id(self):
try:
return self.parent.get_id()
except Exception:
return ''
def make_fullyqualified(self, url):
return self.parent.make_fullyqualified(url)
def as_tuples(self):
r = []
def append(name, attribute):
try:
if isinstance(attribute, tuple):
if callable(attribute[0]):
v1 = attribute[0]()
else:
v1 = getattr(self, attribute[0])
if v1 in [None, 'None']:
return
if callable(attribute[1]):
v2 = attribute[1]()
else:
v2 = getattr(self, attribute[1])
if v2 in [None, 'None']:
return
r.append((name, (v1, v2)))
return
elif callable(attribute):
v = attribute()
else:
v = getattr(self, attribute)
if v not in [None, 'None']:
r.append((name, v))
except Exception as e:
self.error(f'Device.as_tuples: {e}')
import traceback
self.debug(traceback.format_exc())
try:
r.append(('Location', (self.get_location(), self.get_location())))
except Exception:
pass
try:
append('URL base', self.get_urlbase)
except Exception:
pass
try:
r.append(('UDN', self.get_id()))
except Exception:
pass
try:
r.append(('Type', self.device_type))
except Exception:
pass
try:
r.append(('UPnP Version', self.upnp_version))
| |
64
chunk_size = 16
def get_scores_once(feats, points, values):
# sample points inside voxels
sampled_xyz = offset_points(points, self.voxel_size / 2.0, bits=bits)
sampled_idx = torch.arange(points.size(0), device=points.device)[:, None].expand(*sampled_xyz.size()[:2])
sampled_xyz, sampled_idx = sampled_xyz.reshape(-1, 3), sampled_idx.reshape(-1)
field_inputs = self.forward(
{'sampled_point_xyz': sampled_xyz,
'sampled_point_voxel_idx': sampled_idx,
'sampled_point_ray_direction': None,
'sampled_point_distance': None},
{'voxel_vertex_idx': feats,
'voxel_center_xyz': points,
'voxel_vertex_emb': values}) # get field inputs
if encoder_states.get('context', None) is not None:
field_inputs['context'] = encoder_states['context']
# evaluation with density
field_outputs = field_fn(field_inputs, outputs=['sigma'])
free_energy = -torch.relu(field_outputs['sigma']).reshape(-1, bits ** 3)
# return scores
return torch.exp(free_energy)
return torch.cat([get_scores_once(feats[i: i + chunk_size], points[i: i + chunk_size], values)
for i in range(0, points.size(0), chunk_size)], 0)
@torch.no_grad()
def splitting(self):
logger.info("splitting...")
encoder_states = self.precompute(id=None)
feats, points, values = encoder_states['voxel_vertex_idx'], encoder_states['voxel_center_xyz'], encoder_states['voxel_vertex_emb']
new_points, new_feats, new_values, new_keys = splitting_points(points, feats, values, self.voxel_size / 2.0)
new_num_keys = new_keys.size(0)
new_point_length = new_points.size(0)
# set new voxel embeddings
if new_values is not None:
self.values.weight = nn.Parameter(new_values)
self.values.num_embeddings = self.values.weight.size(0)
self.total_size = new_num_keys
self.num_keys = self.num_keys * 0 + self.total_size
self.points = new_points
self.feats = new_feats
self.keep = self.keep.new_ones(new_point_length)
logger.info("splitting done. # of voxels before: {}, after: {} voxels".format(points.size(0), self.keep.sum()))
@property
def flatten_centers(self):
if self._runtime_caches['flatten_centers'] is None:
self.reset_runtime_caches()
return self._runtime_caches['flatten_centers']
@property
def flatten_children(self):
if self._runtime_caches['flatten_children'] is None:
self.reset_runtime_caches()
return self._runtime_caches['flatten_children']
@property
def max_voxel_probs(self):
if self._runtime_caches['max_voxel_probs'] is None:
self.reset_runtime_caches()
return self._runtime_caches['max_voxel_probs']
@max_voxel_probs.setter
def max_voxel_probs(self, x):
self._runtime_caches['max_voxel_probs'] = x
@property
def feature_dim(self):
return self.embed_dim
@property
def dummy_loss(self):
if self.values is not None:
return self.values.weight[0,0] * 0.0
return 0.0
@property
def num_voxels(self):
return self.keep.long().sum()
@register_encoder('multi_sparsevoxel_encoder')
class MultiSparseVoxelEncoder(Encoder):
def __init__(self, args):
super().__init__(args)
try:
self.all_voxels = nn.ModuleList(
[SparseVoxelEncoder(args, vox.strip()) for vox in open(args.voxel_path).readlines()])
except TypeError:
bbox_path = getattr(args, "bbox_path", "/private/home/jgu/data/shapenet/disco_dataset/bunny_point.txt")
self.all_voxels = nn.ModuleList(
[SparseVoxelEncoder(args, None, g.strip() + '/bbox.txt') for g in open(bbox_path).readlines()])
# properties
self.deterministic_step = getattr(args, "deterministic_step", False)
self.use_octree = getattr(args, "use_octree", False)
self.track_max_probs = getattr(args, "track_max_probs", False)
self.cid = None
if getattr(self.args, "global_embeddings", None) is not None:
self.global_embed = torch.zeros(*eval(self.args.global_embeddings)).normal_(mean=0, std=0.01)
self.global_embed = nn.Parameter(self.global_embed, requires_grad=True)
else:
self.global_embed = None
@staticmethod
def add_args(parser):
SparseVoxelEncoder.add_args(parser)
parser.add_argument('--bbox-path', type=str, default=None)
parser.add_argument('--global-embeddings', type=str, default=None,
help="""set global embeddings if provided in global.txt. We follow this format:
(N, D) or (K, N, D) if we have multi-dimensional global features.
D is the global feature dimentions.
N is the number of indices of this feature,
and K is the number of features if provided.""")
def reset_runtime_caches(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].reset_runtime_caches()
def clean_runtime_caches(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].clean_runtime_caches()
def precompute(self, id, global_index=None, *args, **kwargs):
# TODO: this is a HACK for simplicity
assert id.size(0) == 1, "for now, only works for one object"
# HACK
# id = id * 0 + 2
self.cid = id[0]
encoder_states = self.all_voxels[id[0]].precompute(id, *args, **kwargs)
if (global_index is not None) and (self.global_embed is not None):
encoder_states['context'] = torch.stack([
F.embedding(global_index[:, i], self.global_embed[i])
for i in range(self.global_embed.size(0))], 1)
return encoder_states
def export_surfaces(self, field_fn, th, bits):
raise NotImplementedError("does not support for now.")
def export_voxels(self, return_mesh=False):
raise NotImplementedError("does not support for now.")
def get_edge(self, *args, **kwargs):
return self.all_voxels[self.cid].get_edge(*args, **kwargs)
def ray_intersect(self, *args, **kwargs):
return self.all_voxels[self.cid].ray_intersect(*args, **kwargs)
def ray_sample(self, *args, **kwargs):
return self.all_voxels[self.cid].ray_sample(*args, **kwargs)
def forward(self, samples, encoder_states):
inputs = self.all_voxels[self.cid].forward(samples, encoder_states)
if encoder_states.get('context', None) is not None:
inputs['context'] = encoder_states['context']
return inputs
def track_voxel_probs(self, voxel_idxs, voxel_probs):
return self.all_voxels[self.cid].track_voxel_probs(voxel_idxs, voxel_probs)
@torch.no_grad()
def pruning(self, field_fn, th=0.5, train_stats=False):
for id in range(len(self.all_voxels)):
self.all_voxels[id].pruning(field_fn, th, train_stats=train_stats)
@torch.no_grad()
def splitting(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].splitting()
@property
def feature_dim(self):
return self.all_voxels[0].embed_dim
@property
def dummy_loss(self):
return sum([d.dummy_loss for d in self.all_voxels])
@property
def voxel_size(self):
return self.all_voxels[0].voxel_size
@voxel_size.setter
def voxel_size(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].voxel_size = x
@property
def step_size(self):
return self.all_voxels[0].step_size
@step_size.setter
def step_size(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].step_size = x
@property
def max_hits(self):
return self.all_voxels[0].max_hits
@max_hits.setter
def max_hits(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].max_hits = x
@property
def num_voxels(self):
return self.all_voxels[self.cid].num_voxels
@register_encoder('shared_sparsevoxel_encoder')
class SharedSparseVoxelEncoder(MultiSparseVoxelEncoder):
"""
Different from MultiSparseVoxelEncoder, we assume a shared list
of voxels across all models. Usually useful to learn a video sequence.
"""
def __init__(self, args):
super(MultiSparseVoxelEncoder, self).__init__(args)
# using a shared voxel
self.voxel_path = args.voxel_path
self.num_frames = args.num_frames
self.all_voxels = [SparseVoxelEncoder(args, self.voxel_path)]
self.all_voxels = nn.ModuleList(self.all_voxels + [
SparseVoxelEncoder(args, self.voxel_path, shared_values=self.all_voxels[0].values)
for i in range(self.num_frames - 1)])
self.context_embed_dim = args.context_embed_dim
self.contexts = nn.Embedding(self.num_frames, self.context_embed_dim, None)
self.cid = None
@staticmethod
def add_args(parser):
SparseVoxelEncoder.add_args(parser)
parser.add_argument('--num-frames', type=int, help='the total number of frames')
parser.add_argument('--context-embed-dim', type=int, help='context embedding for each view')
def forward(self, samples, encoder_states):
inputs = self.all_voxels[self.cid].forward(samples, encoder_states)
inputs.update({'context': self.contexts(self.cid).unsqueeze(0)})
return inputs
@torch.no_grad()
def pruning(self, field_fn, th=0.5, train_stats=False):
for cid in range(len(self.all_voxels)):
id = torch.tensor([cid], device=self.contexts.weight.device)
encoder_states = {name: v[0] if v is not None else v
for name, v in self.precompute(id).items()}
encoder_states['context'] = self.contexts(id)
self.all_voxels[cid].pruning(field_fn, th,
encoder_states=encoder_states,
train_stats=train_stats)
@torch.no_grad()
def splitting(self):
logger.info("splitting...")
all_feats, all_points = [], []
for id in range(len(self.all_voxels)):
encoder_states = self.all_voxels[id].precompute(id=None)
feats = encoder_states['voxel_vertex_idx']
points = encoder_states['voxel_center_xyz']
values = encoder_states['voxel_vertex_emb']
all_feats.append(feats)
all_points.append(points)
feats, points = torch.cat(all_feats, 0), torch.cat(all_points, 0)
unique_feats, unique_idx = torch.unique(feats, dim=0, return_inverse=True)
unique_points = points[
unique_feats.new_zeros(unique_feats.size(0)).scatter_(
0, unique_idx, torch.arange(unique_idx.size(0), device=unique_feats.device)
)]
new_points, new_feats, new_values, new_keys = splitting_points(unique_points, unique_feats, values, self.voxel_size / 2.0)
new_num_keys = new_keys.size(0)
new_point_length = new_points.size(0)
# set new voxel embeddings (shared voxels)
if values is not None:
self.all_voxels[0].values.weight = nn.Parameter(new_values)
self.all_voxels[0].values.num_embeddings = new_num_keys
for id in range(len(self.all_voxels)):
self.all_voxels[id].total_size = new_num_keys
self.all_voxels[id].num_keys = self.all_voxels[id].num_keys * 0 + self.all_voxels[id].total_size
self.all_voxels[id].points = new_points
self.all_voxels[id].feats = new_feats
self.all_voxels[id].keep = self.all_voxels[id].keep.new_ones(new_point_length)
logger.info("splitting done. # of voxels before: {}, after: {} voxels".format(
unique_points.size(0), new_point_length))
@property
def feature_dim(self):
return self.all_voxels[0].embed_dim + self.context_embed_dim
@register_encoder('triangle_mesh_encoder')
class TriangleMeshEncoder(SparseVoxelEncoder):
"""
Training on fixed mesh model. Cannot pruning..
"""
def __init__(self, args, mesh_path=None, shared_values=None):
super(SparseVoxelEncoder, self).__init__(args)
self.mesh_path = mesh_path if mesh_path is not None else args.mesh_path
assert (self.mesh_path is not None) and os.path.exists(self.mesh_path)
import open3d as o3d
mesh = o3d.io.read_triangle_mesh(self.mesh_path)
vertices = torch.from_numpy(np.asarray(mesh.vertices, dtype=np.float32))
faces = torch.from_numpy(np.asarray(mesh.triangles, dtype=np.long))
step_size = args.raymarching_stepsize
if getattr(args, "raymarching_margin", None) is None:
margin = step_size * 10 # truncated space around the triangle surfaces
else:
margin = args.raymarching_margin
self.register_buffer("margin", torch.scalar_tensor(margin))
self.register_buffer("step_size", torch.scalar_tensor(step_size))
self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))
self.vertices = nn.Parameter(vertices, requires_grad=getattr(args, "trainable_vertices", False))
self.faces = nn.Parameter(faces, requires_grad=False)
# set-up other hyperparameters
self.embed_dim = getattr(args, "voxel_embed_dim", None)
self.deterministic_step = getattr(args, "deterministic_step", False)
self.values = None
self.blur_ratio = getattr(args, "blur_ratio", 0.0)
def upgrade_state_dict_named(self, state_dict, name):
pass
@staticmethod
def add_args(parser):
parser.add_argument('--mesh-path', type=str, help='path for initial mesh file')
parser.add_argument('--voxel-embed-dim', type=int, metavar='N', help="embedding size")
parser.add_argument('--deterministic-step', action='store_true',
help='if set, the model runs fixed stepsize, instead of sampling one')
parser.add_argument('--max-hits', type=int, metavar='N', help='due to restrictions we set a maximum number of hits')
parser.add_argument('--raymarching-stepsize', type=float, metavar='D',
help='ray marching step size for sparse voxels')
parser.add_argument('--raymarching-margin', type=float, default=None,
help='margin around the surface.')
parser.add_argument('--blur-ratio', type=float, default=0,
help="it is possible to shoot outside the triangle. default=0")
parser.add_argument('--trainable-vertices', action='store_true',
help='if set, making the triangle trainable. experimental code. not ideal.')
def precompute(self, id=None, *args, **kwargs):
feats, points, values = self.faces, self.vertices, self.values
if id is not None:
# extend size to support multi-objects
feats = feats.unsqueeze(0).expand(id.size(0), *feats.size()).contiguous()
points = points.unsqueeze(0).expand(id.size(0), *points.size()).contiguous()
values = values.unsqueeze(0).expand(id.size(0), *values.size()).contiguous() if values is not None else None
# moving to multiple objects
if id.size(0) > 1:
feats = feats + points.size(1) * torch.arange(id.size(0),
device=feats.device, dtype=feats.dtype)[:, None, None]
encoder_states = {
'mesh_face_vertex_idx': feats,
'mesh_vertex_xyz': points,
}
return encoder_states
def get_edge(self, ray_start, ray_dir, *args, **kwargs):
return torch.ones_like(ray_dir) * 0.7
@property
def voxel_size(self):
return self.margin
def ray_intersect(self, ray_start, ray_dir, encoder_states):
point_xyz = encoder_states['mesh_vertex_xyz']
point_feats = encoder_states['mesh_face_vertex_idx']
S, V, P, _ = ray_dir.size()
F, G = point_feats.size(1), point_xyz.size(1)
# ray-voxel intersection
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
pts_idx, depth, uv = triangle_ray_intersect(
self.margin, self.blur_ratio, self.max_hits, point_xyz, point_feats, ray_start, ray_dir)
min_depth = (depth[:,:,:,0] + depth[:,:,:,1]).masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
max_depth = (depth[:,:,:,0] + depth[:,:,:,2]).masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
hits = pts_idx.ne(-1).any(-1) # remove all points that completely miss the object
if S > 1: # extend the point-index to multiple shapes (just in case)
pts_idx = (pts_idx + G * torch.arange(S,
device=pts_idx.device, dtype=pts_idx.dtype)[:, None, | |
operating_system
self.registered_date_time = registered_date_time
class MicrosoftGraphPrintDocument(MicrosoftGraphEntity):
"""printDocument.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param configuration: printerDocumentConfiguration.
:type configuration: ~devices_cloud_print.models.MicrosoftGraphPrinterDocumentConfiguration
:param content_type:
:type content_type: str
:param display_name:
:type display_name: str
:param size:
:type size: long
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'configuration': {'key': 'configuration', 'type': 'MicrosoftGraphPrinterDocumentConfiguration'},
'content_type': {'key': 'contentType', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'size': {'key': 'size', 'type': 'long'},
}
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
configuration: Optional["MicrosoftGraphPrinterDocumentConfiguration"] = None,
content_type: Optional[str] = None,
display_name: Optional[str] = None,
size: Optional[int] = None,
**kwargs
):
super(MicrosoftGraphPrintDocument, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.configuration = configuration
self.content_type = content_type
self.display_name = display_name
self.size = size
class MicrosoftGraphPrinterBase(MicrosoftGraphEntity):
"""printerBase.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param capabilities: printerCapabilities.
:type capabilities: ~devices_cloud_print.models.MicrosoftGraphPrinterCapabilities
:param defaults: printerDefaults.
:type defaults: ~devices_cloud_print.models.MicrosoftGraphPrinterDefaults
:param display_name:
:type display_name: str
:param is_accepting_jobs:
:type is_accepting_jobs: bool
:param location: printerLocation.
:type location: ~devices_cloud_print.models.MicrosoftGraphPrinterLocation
:param manufacturer:
:type manufacturer: str
:param model:
:type model: str
:param name:
:type name: str
:param status: printerStatus.
:type status: ~devices_cloud_print.models.MicrosoftGraphPrinterStatus
:param jobs:
:type jobs: list[~devices_cloud_print.models.MicrosoftGraphPrintJob]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'capabilities': {'key': 'capabilities', 'type': 'MicrosoftGraphPrinterCapabilities'},
'defaults': {'key': 'defaults', 'type': 'MicrosoftGraphPrinterDefaults'},
'display_name': {'key': 'displayName', 'type': 'str'},
'is_accepting_jobs': {'key': 'isAcceptingJobs', 'type': 'bool'},
'location': {'key': 'location', 'type': 'MicrosoftGraphPrinterLocation'},
'manufacturer': {'key': 'manufacturer', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'MicrosoftGraphPrinterStatus'},
'jobs': {'key': 'jobs', 'type': '[MicrosoftGraphPrintJob]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
capabilities: Optional["MicrosoftGraphPrinterCapabilities"] = None,
defaults: Optional["MicrosoftGraphPrinterDefaults"] = None,
display_name: Optional[str] = None,
is_accepting_jobs: Optional[bool] = None,
location: Optional["MicrosoftGraphPrinterLocation"] = None,
manufacturer: Optional[str] = None,
model: Optional[str] = None,
name: Optional[str] = None,
status: Optional["MicrosoftGraphPrinterStatus"] = None,
jobs: Optional[List["MicrosoftGraphPrintJob"]] = None,
**kwargs
):
super(MicrosoftGraphPrinterBase, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.capabilities = capabilities
self.defaults = defaults
self.display_name = display_name
self.is_accepting_jobs = is_accepting_jobs
self.location = location
self.manufacturer = manufacturer
self.model = model
self.name = name
self.status = status
self.jobs = jobs
class MicrosoftGraphPrinter(MicrosoftGraphPrinterBase):
"""printer.
:param id: Read-only.
:type id: str
:param capabilities: printerCapabilities.
:type capabilities: ~devices_cloud_print.models.MicrosoftGraphPrinterCapabilities
:param defaults: printerDefaults.
:type defaults: ~devices_cloud_print.models.MicrosoftGraphPrinterDefaults
:param display_name:
:type display_name: str
:param is_accepting_jobs:
:type is_accepting_jobs: bool
:param location: printerLocation.
:type location: ~devices_cloud_print.models.MicrosoftGraphPrinterLocation
:param manufacturer:
:type manufacturer: str
:param model:
:type model: str
:param name:
:type name: str
:param status: printerStatus.
:type status: ~devices_cloud_print.models.MicrosoftGraphPrinterStatus
:param jobs:
:type jobs: list[~devices_cloud_print.models.MicrosoftGraphPrintJob]
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param accepting_jobs:
:type accepting_jobs: bool
:param is_shared:
:type is_shared: bool
:param registered_date_time:
:type registered_date_time: ~datetime.datetime
:param allowed_groups:
:type allowed_groups: list[~devices_cloud_print.models.MicrosoftGraphPrintIdentity]
:param allowed_users:
:type allowed_users: list[~devices_cloud_print.models.MicrosoftGraphPrintUserIdentity]
:param connectors:
:type connectors: list[~devices_cloud_print.models.MicrosoftGraphPrintConnector]
:param share: printerShare.
:type share: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:param shares:
:type shares: list[~devices_cloud_print.models.MicrosoftGraphPrinterShare]
:param task_triggers:
:type task_triggers: list[~devices_cloud_print.models.MicrosoftGraphPrintTaskTrigger]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': 'MicrosoftGraphPrinterCapabilities'},
'defaults': {'key': 'defaults', 'type': 'MicrosoftGraphPrinterDefaults'},
'display_name': {'key': 'displayName', 'type': 'str'},
'is_accepting_jobs': {'key': 'isAcceptingJobs', 'type': 'bool'},
'location': {'key': 'location', 'type': 'MicrosoftGraphPrinterLocation'},
'manufacturer': {'key': 'manufacturer', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'MicrosoftGraphPrinterStatus'},
'jobs': {'key': 'jobs', 'type': '[MicrosoftGraphPrintJob]'},
'additional_properties': {'key': '', 'type': '{object}'},
'accepting_jobs': {'key': 'acceptingJobs', 'type': 'bool'},
'is_shared': {'key': 'isShared', 'type': 'bool'},
'registered_date_time': {'key': 'registeredDateTime', 'type': 'iso-8601'},
'allowed_groups': {'key': 'allowedGroups', 'type': '[MicrosoftGraphPrintIdentity]'},
'allowed_users': {'key': 'allowedUsers', 'type': '[MicrosoftGraphPrintUserIdentity]'},
'connectors': {'key': 'connectors', 'type': '[MicrosoftGraphPrintConnector]'},
'share': {'key': 'share', 'type': 'MicrosoftGraphPrinterShare'},
'shares': {'key': 'shares', 'type': '[MicrosoftGraphPrinterShare]'},
'task_triggers': {'key': 'taskTriggers', 'type': '[MicrosoftGraphPrintTaskTrigger]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
capabilities: Optional["MicrosoftGraphPrinterCapabilities"] = None,
defaults: Optional["MicrosoftGraphPrinterDefaults"] = None,
display_name: Optional[str] = None,
is_accepting_jobs: Optional[bool] = None,
location: Optional["MicrosoftGraphPrinterLocation"] = None,
manufacturer: Optional[str] = None,
model: Optional[str] = None,
name: Optional[str] = None,
status: Optional["MicrosoftGraphPrinterStatus"] = None,
jobs: Optional[List["MicrosoftGraphPrintJob"]] = None,
additional_properties: Optional[Dict[str, object]] = None,
accepting_jobs: Optional[bool] = None,
is_shared: Optional[bool] = None,
registered_date_time: Optional[datetime.datetime] = None,
allowed_groups: Optional[List["MicrosoftGraphPrintIdentity"]] = None,
allowed_users: Optional[List["MicrosoftGraphPrintUserIdentity"]] = None,
connectors: Optional[List["MicrosoftGraphPrintConnector"]] = None,
share: Optional["MicrosoftGraphPrinterShare"] = None,
shares: Optional[List["MicrosoftGraphPrinterShare"]] = None,
task_triggers: Optional[List["MicrosoftGraphPrintTaskTrigger"]] = None,
**kwargs
):
super(MicrosoftGraphPrinter, self).__init__(id=id, capabilities=capabilities, defaults=defaults, display_name=display_name, is_accepting_jobs=is_accepting_jobs, location=location, manufacturer=manufacturer, model=model, name=name, status=status, jobs=jobs, **kwargs)
self.additional_properties = additional_properties
self.accepting_jobs = accepting_jobs
self.is_shared = is_shared
self.registered_date_time = registered_date_time
self.allowed_groups = allowed_groups
self.allowed_users = allowed_users
self.connectors = connectors
self.share = share
self.shares = shares
self.task_triggers = task_triggers
class MicrosoftGraphPrinterCapabilities(msrest.serialization.Model):
"""printerCapabilities.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param bottom_margins:
:type bottom_margins: list[int]
:param collation:
:type collation: bool
:param color_modes:
:type color_modes: list[str or ~devices_cloud_print.models.MicrosoftGraphPrintColorMode]
:param content_types:
:type content_types: list[str]
:param copies_per_job: integerRange.
:type copies_per_job: ~devices_cloud_print.models.MicrosoftGraphIntegerRange
:param dpis:
:type dpis: list[int]
:param duplex_modes:
:type duplex_modes: list[str or ~devices_cloud_print.models.MicrosoftGraphPrintDuplexMode]
:param feed_directions:
:type feed_directions: list[str or
~devices_cloud_print.models.MicrosoftGraphPrinterFeedDirection]
:param feed_orientations:
:type feed_orientations: list[str or
~devices_cloud_print.models.MicrosoftGraphPrinterFeedOrientation]
:param finishings:
:type finishings: list[str or ~devices_cloud_print.models.MicrosoftGraphPrintFinishing]
:param input_bins:
:type input_bins: list[str]
:param is_color_printing_supported:
:type is_color_printing_supported: bool
:param is_page_range_supported:
:type is_page_range_supported: bool
:param left_margins:
:type left_margins: list[int]
:param media_colors:
:type media_colors: list[str]
:param media_sizes:
:type media_sizes: list[str]
:param media_types:
:type media_types: list[str]
:param multipage_layouts:
:type multipage_layouts: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintMultipageLayout]
:param orientations:
:type orientations: list[str or ~devices_cloud_print.models.MicrosoftGraphPrintOrientation]
:param output_bins:
:type output_bins: list[str]
:param pages_per_sheet:
:type pages_per_sheet: list[int]
:param qualities:
:type qualities: list[str or ~devices_cloud_print.models.MicrosoftGraphPrintQuality]
:param right_margins:
:type right_margins: list[int]
:param scalings:
:type scalings: list[str or ~devices_cloud_print.models.MicrosoftGraphPrintScaling]
:param supported_color_configurations:
:type supported_color_configurations: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintColorConfiguration]
:param supported_copies_per_job: integerRange.
:type supported_copies_per_job: ~devices_cloud_print.models.MicrosoftGraphIntegerRange
:param supported_document_mime_types:
:type supported_document_mime_types: list[str]
:param supported_duplex_configurations:
:type supported_duplex_configurations: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintDuplexConfiguration]
:param supported_finishings:
:type supported_finishings: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintFinishing]
:param supported_media_colors:
:type supported_media_colors: list[str]
:param supported_media_sizes:
:type supported_media_sizes: list[str]
:param supported_media_types:
:type supported_media_types: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintMediaType]
:param supported_orientations:
:type supported_orientations: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintOrientation]
:param supported_output_bins:
:type supported_output_bins: list[str]
:param supported_pages_per_sheet: integerRange.
:type supported_pages_per_sheet: ~devices_cloud_print.models.MicrosoftGraphIntegerRange
:param supported_presentation_directions:
:type supported_presentation_directions: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintPresentationDirection]
:param supported_print_qualities:
:type supported_print_qualities: list[str or
~devices_cloud_print.models.MicrosoftGraphPrintQuality]
:param supports_fit_pdf_to_page:
:type supports_fit_pdf_to_page: bool
:param top_margins:
:type top_margins: list[int]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'bottom_margins': {'key': 'bottomMargins', 'type': '[int]'},
'collation': {'key': 'collation', 'type': 'bool'},
'color_modes': {'key': 'colorModes', 'type': '[str]'},
'content_types': {'key': 'contentTypes', 'type': '[str]'},
'copies_per_job': {'key': 'copiesPerJob', 'type': 'MicrosoftGraphIntegerRange'},
'dpis': {'key': 'dpis', 'type': '[int]'},
'duplex_modes': {'key': 'duplexModes', 'type': '[str]'},
'feed_directions': {'key': 'feedDirections', 'type': '[str]'},
'feed_orientations': {'key': 'feedOrientations', 'type': '[str]'},
'finishings': {'key': 'finishings', 'type': '[str]'},
'input_bins': {'key': 'inputBins', 'type': '[str]'},
'is_color_printing_supported': {'key': 'isColorPrintingSupported', 'type': 'bool'},
'is_page_range_supported': {'key': 'isPageRangeSupported', 'type': 'bool'},
'left_margins': {'key': 'leftMargins', 'type': '[int]'},
'media_colors': {'key': 'mediaColors', 'type': '[str]'},
'media_sizes': {'key': 'mediaSizes', 'type': '[str]'},
'media_types': {'key': 'mediaTypes', 'type': '[str]'},
'multipage_layouts': {'key': 'multipageLayouts', 'type': '[str]'},
'orientations': {'key': 'orientations', 'type': '[str]'},
'output_bins': {'key': 'outputBins', 'type': '[str]'},
'pages_per_sheet': {'key': 'pagesPerSheet', 'type': '[int]'},
'qualities': {'key': 'qualities', 'type': '[str]'},
'right_margins': {'key': 'rightMargins', 'type': '[int]'},
'scalings': {'key': 'scalings', 'type': '[str]'},
'supported_color_configurations': {'key': 'supportedColorConfigurations', 'type': '[str]'},
'supported_copies_per_job': {'key': 'supportedCopiesPerJob', 'type': 'MicrosoftGraphIntegerRange'},
'supported_document_mime_types': {'key': 'supportedDocumentMimeTypes', 'type': '[str]'},
'supported_duplex_configurations': {'key': 'supportedDuplexConfigurations', 'type': '[str]'},
'supported_finishings': {'key': 'supportedFinishings', 'type': '[str]'},
'supported_media_colors': {'key': 'supportedMediaColors', 'type': '[str]'},
'supported_media_sizes': {'key': 'supportedMediaSizes', 'type': '[str]'},
'supported_media_types': {'key': 'supportedMediaTypes', 'type': '[str]'},
'supported_orientations': {'key': 'supportedOrientations', 'type': '[str]'},
'supported_output_bins': {'key': 'supportedOutputBins', 'type': '[str]'},
'supported_pages_per_sheet': {'key': 'supportedPagesPerSheet', 'type': 'MicrosoftGraphIntegerRange'},
'supported_presentation_directions': {'key': 'supportedPresentationDirections', 'type': '[str]'},
'supported_print_qualities': {'key': 'supportedPrintQualities', 'type': '[str]'},
'supports_fit_pdf_to_page': {'key': 'supportsFitPdfToPage', 'type': 'bool'},
'top_margins': {'key': 'topMargins', 'type': '[int]'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
bottom_margins: Optional[List[int]] = None,
collation: Optional[bool] = None,
color_modes: Optional[List[Union[str, "MicrosoftGraphPrintColorMode"]]] = None,
content_types: Optional[List[str]] = None,
copies_per_job: Optional["MicrosoftGraphIntegerRange"] = None,
dpis: Optional[List[int]] = None,
duplex_modes: Optional[List[Union[str, "MicrosoftGraphPrintDuplexMode"]]] = None,
feed_directions: Optional[List[Union[str, "MicrosoftGraphPrinterFeedDirection"]]] = None,
feed_orientations: Optional[List[Union[str, "MicrosoftGraphPrinterFeedOrientation"]]] = None,
finishings: Optional[List[Union[str, "MicrosoftGraphPrintFinishing"]]] = None,
input_bins: Optional[List[str]] = None,
is_color_printing_supported: Optional[bool] = None,
is_page_range_supported: Optional[bool] = None,
left_margins: Optional[List[int]] = None,
media_colors: Optional[List[str]] = None,
media_sizes: Optional[List[str]] = None,
media_types: Optional[List[str]] = None,
multipage_layouts: Optional[List[Union[str, "MicrosoftGraphPrintMultipageLayout"]]] = None,
orientations: Optional[List[Union[str, "MicrosoftGraphPrintOrientation"]]] = None,
| |
(rad)")
if component == "dt13_resid": htemp.SetYTitle("#Deltax^{local} (mm)")
if component == "dt13_slope": htemp.SetYTitle("#Deltadx/dz^{local} (mrad)")
if component == "dt2_resid": htemp.SetYTitle("#Deltay^{local} (mm)")
if component == "dt2_slope": htemp.SetYTitle("#Deltady/dz^{local} (mrad)")
htemp.GetXaxis().CenterTitle()
htemp.GetYaxis().CenterTitle()
htemp.GetYaxis().SetTitleOffset(0.75)
c1.Clear()
htemp.Draw()
if len(gtemp_12_phi) > 0:
gtemp_12.Draw("p")
gtemp2_12.Draw("p")
if len(gtemp_23_phi) > 0:
gtemp_23.Draw("p")
gtemp2_23.Draw("p")
if len(gtemp_34_phi) > 0:
gtemp_34.Draw("p")
gtemp2_34.Draw("p")
tlegend = ROOT.TLegend(0.5, 0.72, 0.9, 0.92)
tlegend.SetBorderSize(0)
tlegend.SetFillColor(ROOT.kWhite)
if len(gtemp_12_phi) > 0:
tlegend.AddEntry(gtemp_12, "MB1 - MB2 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_12_val), stdev(gtemp_12_val)), "pl")
if len(gtemp_23_phi) > 0:
tlegend.AddEntry(gtemp_23, "MB2 - MB3 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_23_val), stdev(gtemp_23_val)), "pl")
if len(gtemp_34_phi) > 0:
tlegend.AddEntry(gtemp_34, "MB3 - MB4 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_34_val), stdev(gtemp_34_val)), "pl")
if len(gtemp_12_phi) > 0:
tlegend.AddEntry(gtemp_12, "total mean: %4.2f, total RMS: %4.2f" % \
(mean(gtemp_12_val + gtemp_23_val + gtemp_34_val),
stdev(gtemp_12_val + gtemp_23_val + gtemp_34_val)), "")
tlegend.Draw()
##################################################################################
def segdiffvsphicsc(tfiles, component, pair, window=5., **args):
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.05)
if not component[0:3] == "csc": Exception
endcap = args["endcap"]
if endcap=="m":
endcapnum=2
endcapsign="-"
elif endcap=="p":
endcapnum=1
endcapsign="+"
else: raise Exception
station1 = int(str(pair)[0])
station2 = int(str(pair)[1])
if not station2-station1==1: raise Exception
rings = [1,2]
if station2==4: rings = [1]
global htemp, gtemp_1, gtemp2_1, gtemp_2, gtemp2_2, tlegend
htemp = ROOT.TH1F("htemp", "", 1, -pi*5./180., pi*(2.-5./180.))
gtemp_1_phi, gtemp_1_val, gtemp_1_err, gtemp_1_val2, gtemp_1_err2 = [], [], [], [], []
gtemp_2_phi, gtemp_2_val, gtemp_2_err, gtemp_2_val2, gtemp_2_err2 = [], [], [], [], []
for ring in rings:
chambers = range(1,37)
if ring == 1: chambers = range(1,19)
for chamber in chambers:
phi, val, err, val2, err2, fit1, fit2, fit3 = segdiff(tfiles, component, pair, endcap=endcap, ring=ring, chamber=chamber)
if fit1 and fit2 and fit3:
if ring==1:
gtemp_1_phi.append(phi)
gtemp_1_val.append(val)
gtemp_1_err.append(err)
gtemp_1_val2.append(val2)
gtemp_1_err2.append(err2)
if ring==2:
gtemp_2_phi.append(phi)
gtemp_2_val.append(val)
gtemp_2_err.append(err)
gtemp_2_val2.append(val2)
gtemp_2_err2.append(err2)
#print "len(gtemp_12_phi) ", len(gtemp_12_phi)
#print "len(gtemp_23_phi) ",len(gtemp_23_phi)
#print "len(gtemp_34_phi) ",len(gtemp_34_phi)
if len(gtemp_1_phi) > 0:
gtemp_1 = ROOT.TGraphErrors(len(gtemp_1_phi), array.array("d", gtemp_1_phi), array.array("d", gtemp_1_val),
array.array("d", [0.] * len(gtemp_1_phi)), array.array("d", gtemp_1_err))
gtemp2_1 = ROOT.TGraphErrors(len(gtemp_1_phi), array.array("d", gtemp_1_phi), array.array("d", gtemp_1_val2),
array.array("d", [0.] * len(gtemp_1_phi)), array.array("d", gtemp_1_err2))
if len(gtemp_2_phi) > 0:
gtemp_2 = ROOT.TGraphErrors(len(gtemp_2_phi), array.array("d", gtemp_2_phi), array.array("d", gtemp_2_val),
array.array("d", [0.] * len(gtemp_2_phi)), array.array("d", gtemp_2_err))
gtemp2_2 = ROOT.TGraphErrors(len(gtemp_2_phi), array.array("d", gtemp_2_phi), array.array("d", gtemp_2_val2),
array.array("d", [0.] * len(gtemp_2_phi)), array.array("d", gtemp_2_err2))
if len(gtemp_1_phi) > 0:
gtemp_1.SetMarkerStyle(20); gtemp_1.SetMarkerSize(1.);
gtemp_1.SetMarkerColor(ROOT.kBlue); gtemp_1.SetLineColor(ROOT.kBlue)
gtemp2_1.SetMarkerStyle(24); gtemp2_1.SetMarkerSize(1.);
gtemp2_1.SetMarkerColor(ROOT.kBlue); gtemp2_1.SetLineColor(ROOT.kBlue)
if len(gtemp_2_phi) > 0:
gtemp_2.SetMarkerStyle(21); gtemp_2.SetMarkerSize(1.);
gtemp_2.SetMarkerColor(ROOT.kRed); gtemp_2.SetLineColor(ROOT.kRed)
gtemp2_2.SetMarkerStyle(25); gtemp2_2.SetMarkerSize(1.);
gtemp2_2.SetMarkerColor(ROOT.kRed); gtemp2_2.SetLineColor(ROOT.kRed)
htemp.SetTitle("ME%s%d - ME%s%d" % (endcapsign,station2,endcapsign,station1))
htemp.SetAxisRange(-window, window, "Y")
htemp.SetXTitle("Average #phi of pair (rad)")
if component == "csc_resid": htemp.SetYTitle("#Delta(r#phi)^{local} (mm)")
if component == "csc_slope": htemp.SetYTitle("#Deltad(r#phi)/dz^{local} (mrad)")
htemp.GetXaxis().CenterTitle()
htemp.GetYaxis().CenterTitle()
htemp.GetYaxis().SetTitleOffset(0.75)
c1.Clear()
htemp.Draw()
if len(gtemp_1_phi) > 0:
gtemp_1.Draw("p")
gtemp2_1.Draw("p")
if len(gtemp_2_phi) > 0:
gtemp_2.Draw("p")
gtemp2_2.Draw("p")
tlegend = ROOT.TLegend(0.5, 0.72, 0.9, 0.92)
tlegend.SetBorderSize(0)
tlegend.SetFillColor(ROOT.kWhite)
if len(gtemp_1_phi) > 0:
tlegend.AddEntry(gtemp_1, "ring 1 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_1_val), stdev(gtemp_1_val)), "pl")
if len(gtemp_2_phi) > 0:
tlegend.AddEntry(gtemp_2, "ring 2 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_2_val), stdev(gtemp_2_val)), "pl")
#if len(gtemp_12_phi) > 0:
# tlegend.AddEntry(gtemp_12, "total mean: %4.2f, total RMS: %4.2f" % \
# (mean(gtemp_12_val + gtemp_23_val + gtemp_34_val),
# stdev(gtemp_12_val + gtemp_23_val + gtemp_34_val)), "")
tlegend.Draw()
##################################################################################
# makes a scatterplot of corrections coming either from reports (if xml geometries are None)
# or from geometryX and geometryY (WRT the common initial geometry0)
def corrections2D(reportsX=None, reportsY=None, geometry0=None, geometryX=None, geometryY=None,
window=25., selection=None, name="tmp", canvas=None, pre_title_x=None, pre_title_y=None,
which="110011"):
tdrStyle.SetOptStat(0)
tdrStyle.SetStatW(0.40)
# determine what are we plotting: report vs report or xml vs xml
mode = None
check_reports = False
if reportsX is not None and reportsY is not None:
mode = "reports"
check_reports = True
if geometry0 is not None and geometryX is not None and geometryY is not None:
mode = "xmls"
if mode is None:
print("Either couple of reports or three geometries have to be given as input. Exiting...")
return
# setup ranges with the maximum [-window,window] that later will be optimized to [-wnd_adaptive,wnd_adaptive]
wnd = [window]*6
wnd_adaptive = [.1]*6
global hx, hy, hz, hphix, hphiy, hphiz
bins=2000
hx = ROOT.TH2F("%s_x" % name, "", bins, -wnd[0], wnd[0], bins, -wnd[0], wnd[0])
hy = ROOT.TH2F("%s_y" % name, "", bins, -wnd[1], wnd[1], bins, -wnd[1], wnd[1])
hz = ROOT.TH2F("%s_z" % name, "", bins, -wnd[2], wnd[2], bins, -wnd[2], wnd[2])
hphix = ROOT.TH2F("%s_phix" % name, "", bins, -wnd[3], wnd[3], bins, -wnd[3], wnd[3])
hphiy = ROOT.TH2F("%s_phiy" % name, "", bins, -wnd[4], wnd[4], bins, -wnd[4], wnd[4])
hphiz = ROOT.TH2F("%s_phiz" % name, "", bins, -wnd[5], wnd[5], bins, -wnd[5], wnd[5])
hhh = [hx, hy, hz, hphix, hphiy, hphiz]
# initialize PCA objects
global pca_x, pca_y, pca_z, pca_phix, pca_phiy, pca_phiz
pca_x = ROOT.TPrincipal(2,"D")
pca_y = ROOT.TPrincipal(2,"D")
pca_z = ROOT.TPrincipal(2,"D")
pca_phix = ROOT.TPrincipal(2,"D")
pca_phiy = ROOT.TPrincipal(2,"D")
pca_phiz = ROOT.TPrincipal(2,"D")
pcas = [pca_x, pca_y, pca_z, pca_phix, pca_phiy, pca_phiz]
# arrays to later fill graphs with
ax=[]; ay=[]; az=[]; aphix=[]; aphiy=[]; aphiz=[]
aaa = [ax, ay, az, aphix, aphiy, aphiz]
# list of postal addresses
postal_addresses = []
# if reports are given, use them to fill addresses and do extra checks
if check_reports:
for r1 in reportsX:
# skip ME1/a
if r1.postal_address[0]=='CSC' and r1.postal_address[2]==1 and r1.postal_address[3]==4: continue
if selection is None or (selection.__code__.co_argcount == len(r1.postal_address) and selection(*r1.postal_address)):
r2 = getReportByPostalAddress(r1.postal_address, reportsY)
if r2 is None:
print("bad r2 in ",r1.postal_address)
continue
if r1.status != "PASS" or r2.status != "PASS":
print("bad status", r1.postal_address, r1.status, r2.status)
continue
postal_addresses.append(r1.postal_address)
# otherwise, use chamber addresses from xmls
else:
for key in geometry0.dt.keys():
if len(key)==3 and key in geometryX.dt and key in geometryY.dt:
postal_addresses.append( tuple(['DT'] + list(key)) )
for key in geometry0.csc.keys():
# skip ME1/a
if key[2]==1 and key[3]==4: continue
if len(key)==4 and key in geometryX.csc and key in geometryY.csc:
postal_addresses.append( tuple(['CSC'] + list(key)) )
# fill the values
for addr in postal_addresses:
# checks the selection function
if not (selection is None or (selection.__code__.co_argcount == len(addr) and selection(*addr)) ): continue
factors = [10. * signConventions[addr][0], 10. * signConventions[addr][1], 10. * signConventions[addr][2],
1000., 1000., 1000. ]
if check_reports:
rX = getReportByPostalAddress(addr, reportsX)
rY = getReportByPostalAddress(addr, reportsY)
deltasX = [rX.deltax, rX.deltay, rX.deltaz, rX.deltaphix, rX.deltaphiy, rX.deltaphiz]
deltasY = [rY.deltax, rY.deltay, rY.deltaz, rY.deltaphix, rY.deltaphiy, rY.deltaphiz]
if mode == "reports":
checks = map( lambda d1, d2: d1 is not None and d2 is not None and d1.error is not None \
and d2.error is not None and (d1.error**2 + d2.error**2) > 0. , \
deltasX, deltasY)
for i in range(len(checks)):
if not checks[i]: continue
fillX = deltasX[i].value * factors[i]
fillY = deltasY[i].value * factors[i]
aaa[i].append([fillX,fillY])
pcas[i].AddRow(array.array('d',[fillX,fillY]))
mx = max(abs(fillX), abs(fillY))
if mx > wnd_adaptive[i]: wnd_adaptive[i] = mx
if mode == "xmls":
db0 = dbX = dbY = None
if addr[0] == "DT":
db0, dbX, dbY = geometry0.dt[addr[1:]], geometryX.dt[addr[1:]], geometryY.dt[addr[1:]]
if addr[0] == 'CSC':
db0, dbX, dbY = geometry0.csc[addr[1:]], geometryX.csc[addr[1:]], geometryY.csc[addr[1:]]
checks = [True]*6
if check_reports:
checks = map( lambda d1, d2: d1 is not None and d2 is not None , deltasX, deltasY)
gdeltas0 = [db0.x, db0.y, db0.z, db0.phix, db0.phiy, db0.phiz]
gdeltasX = [dbX.x, dbX.y, dbX.z, dbX.phix, dbX.phiy, dbX.phiz]
gdeltasY = [dbY.x, dbY.y, dbY.z, dbY.phix, dbY.phiy, dbY.phiz]
for i in range(len(checks)):
if not checks[i]: continue
fillX = (gdeltasX[i] - gdeltas0[i]) * factors[i]
fillY = (gdeltasY[i] - gdeltas0[i]) * factors[i]
aaa[i].append([fillX,fillY])
pcas[i].AddRow(array.array('d',[fillX,fillY]))
mx = max(abs(fillX), abs(fillY))
if mx > wnd_adaptive[i]: wnd_adaptive[i] = mx
#if addr[0] == 'CSC' and i==1 and (abs(fillX)>0.01 or abs(fillY)>0.01): print addr, ": hugeCSC i=%d dx=%.03g dy=%.03g"%(i,fillX,fillY)
#if addr[0] == 'CSC' and i==2 and (abs(fillX)>0.02 or abs(fillY)>0.02): print addr, ": hugeCSC i=%d dx=%.03g dy=%.03g"%(i,fillX,fillY)
#if addr[0] == 'CSC' and i==3 and (abs(fillX)>0.05 or abs(fillY)>0.05): print addr, ": hugeCSC i=%d dx=%.03g dy=%.03g"%(i,fillX,fillY)
if mode == "xmls":
if pre_title_x is None: pre_title_x = "geometry 1 "
if pre_title_y is None: pre_title_y = "geometry 2 "
if mode == "reports":
if pre_title_x is None: pre_title_x = "iteration's "
if pre_title_y is None: pre_title_y = "other iteration's "
tmptitles = ["#Deltax (mm)", "#Deltay (mm)", "#Deltaz (mm)",
"#Delta#phi_{x} (mrad)", "#Delta#phi_{y} (mrad)", "#Delta#phi_{z} (mrad)"]
htitles = []
for t in tmptitles: htitles.append([pre_title_x + t, pre_title_y + t])
| |
<reponame>jmichuda/bmi-214-final-project
#!/usr/bin/python
## taken from https://github.com/oncokb/oncokb-annotator/blob/master/AnnotatorCore.py
## used under GNU license
import json
import sys
import csv
from enum import Enum
import requests
import os.path
import logging
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import date
import ctypes as ct
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
log = logging.getLogger('AnnotatorCore')
# API timeout is set to two minutes
REQUEST_TIMEOUT = 240
csv.field_size_limit(int(ct.c_ulong(-1).value // 2)) # Deal with overflow problem on Windows, https://stackoverflow.co/120m/questions/15063936/csv-error-field-larger-than-field-limit-131072
sizeLimit = csv.field_size_limit()
csv.field_size_limit(sizeLimit) # for reading large files
oncokbapiurl = "https://www.oncokb.org/api/v1"
oncokbapibearertoken = ""
def setoncokbbaseurl(u):
global oncokbapiurl
oncokbapiurl = u.rstrip('/') + '/api/v1'
def setoncokbapitoken(t):
global oncokbapibearertoken
oncokbapibearertoken = t.strip()
cancerhotspotsbaseurl = "http://www.cancerhotspots.org"
def setcancerhotspotsbaseurl(u):
global cancerhotspotsbaseurl
cancerhotspotsbaseurl = u
_3dhotspotsbaseurl = "http://www.3dhotspots.org"
def set3dhotspotsbaseurl(u):
global _3dhotspotsbaseurl
_3dhotspotsbaseurl = u
sampleidsfilter = None
def setsampleidsfileterfile(f):
global sampleidsfilter
content = [line.rstrip() for line in open(f)]
sampleidsfilter = set(content)
log.info(len(sampleidsfilter))
GENE_IN_ONCOKB_HEADER = 'GENE_IN_ONCOKB'
VARIANT_IN_ONCOKB_HEADER = 'VARIANT_IN_ONCOKB'
GENE_IN_ONCOKB_DEFAULT = 'False'
VARIANT_IN_ONCOKB_DEFAULT = 'False'
levels = [
'LEVEL_1',
'LEVEL_2',
'LEVEL_3A',
'LEVEL_3B',
'LEVEL_4',
'LEVEL_R1',
'LEVEL_R2',
'LEVEL_R3'
]
dxLevels = [
'LEVEL_Dx1',
'LEVEL_Dx2',
'LEVEL_Dx3'
]
pxLevels = [
'LEVEL_Px1',
'LEVEL_Px2',
'LEVEL_Px3'
]
mutationtypeconsequencemap = {
'3\'Flank': ['any'],
'5\'Flank ': ['any'],
'Targeted_Region': ['inframe_deletion', 'inframe_insertion'],
'COMPLEX_INDEL': ['inframe_deletion', 'inframe_insertion'],
'ESSENTIAL_SPLICE_SITE': ['feature_truncation'],
'Exon skipping': ['inframe_deletion'],
'Frameshift deletion': ['frameshift_variant'],
'Frameshift insertion': ['frameshift_variant'],
'FRAMESHIFT_CODING': ['frameshift_variant'],
'Frame_Shift_Del': ['frameshift_variant'],
'Frame_Shift_Ins': ['frameshift_variant'],
'Fusion': ['fusion'],
'Indel': ['frameshift_variant', 'inframe_deletion', 'inframe_insertion'],
'In_Frame_Del': ['inframe_deletion'],
'In_Frame_Ins': ['inframe_insertion'],
'Missense': ['missense_variant'],
'Missense_Mutation': ['missense_variant'],
'Nonsense_Mutation': ['stop_gained'],
'Nonstop_Mutation': ['stop_lost'],
'Splice_Site': ['splice_region_variant'],
'Splice_Site_Del': ['splice_region_variant'],
'Splice_Site_SNP': ['splice_region_variant'],
'splicing': ['splice_region_variant'],
'Translation_Start_Site': ['start_lost'],
'vIII deletion': ['any']
}
# column headers
HUGO_HEADERS = ['HUGO_SYMBOL', 'HUGO_GENE_SYMBOL', 'GENE']
CONSEQUENCE_HEADERS = ['VARIANT_CLASSIFICATION', 'MUTATION_TYPE']
ALTERATION_HEADER = 'ALTERATION'
HGVSP_SHORT_HEADER = 'HGVSP_SHORT'
HGVSP_HEADER = 'HGVSP'
HGVSG_HEADER = 'HGVSG'
HGVS_HEADERS = [ALTERATION_HEADER, HGVSP_SHORT_HEADER, HGVSP_HEADER, HGVSG_HEADER, 'AMINO_ACID_CHANGE', 'FUSION']
SAMPLE_HEADERS = ['SAMPLE_ID', 'TUMOR_SAMPLE_BARCODE']
PROTEIN_START_HEADERS = ['PROTEIN_START']
PROTEIN_END_HEADERS = ['PROTEIN_END']
PROTEIN_POSITION_HEADERS = ['PROTEIN_POSITION']
CANCER_TYPE_HEADERS = ['ONCOTREE_CODE', 'CANCER_TYPE']
FUSION_HEADERS = ['FUSION']
REFERENCE_GENOME_HEADERS = ['NCBI_BUILD', 'REFERENCE_GENOME']
# columns for genomic change annotation
GC_CHROMOSOME_HEADER = 'CHROMOSOME'
GC_START_POSITION_HEADER = 'START_POSITION'
GC_END_POSITION_HEADER = 'END_POSITION'
GC_REF_ALLELE_HEADER = 'REFERENCE_ALLELE'
GC_VAR_ALLELE_1_HEADER = 'TUMOR_SEQ_ALLELE1'
GC_VAR_ALLELE_2_HEADER = 'TUMOR_SEQ_ALLELE2'
GENOMIC_CHANGE_HEADERS = [GC_CHROMOSOME_HEADER, GC_START_POSITION_HEADER, GC_END_POSITION_HEADER, GC_REF_ALLELE_HEADER, GC_VAR_ALLELE_1_HEADER, GC_VAR_ALLELE_2_HEADER]
class QueryType(Enum):
HGVSP_SHORT = 'HGVSP_SHORT'
HGVSP = 'HGVSP'
HGVSG = 'HGVSG'
GENOMIC_CHANGE = 'GENOMIC_CHANGE'
class ReferenceGenome(Enum):
GRCH37 = 'GRCh37'
GRCH38 = 'GRCh38'
REQUIRED_QUERY_TYPE_COLUMNS = {
QueryType.HGVSP_SHORT: [HGVSP_SHORT_HEADER],
QueryType.HGVSP: [HGVSP_HEADER],
QueryType.HGVSG: [HGVSG_HEADER],
QueryType.GENOMIC_CHANGE: GENOMIC_CHANGE_HEADERS
}
POST_QUERIES_THRESHOLD = 200
POST_QUERIES_THRESHOLD_GC_HGVSG = 100
def getOncokbInfo():
ret = ['Files annotated on ' + date.today().strftime('%m/%d/%Y') + "\nOncoKB API URL: "+oncokbapiurl]
try:
info = requests.get(oncokbapiurl + "/info", timeout=REQUEST_TIMEOUT).json()
ret.append('\nOncoKB data version: ' + info['dataVersion']['version']+', released on ' + info['dataVersion']['date'])
except:
log.error("error when fetch OncoKB info")
return ''.join(ret)
def generateReadme(outfile):
outf = open(outfile, 'w+', 1000)
outf.write(getOncokbInfo())
outf.close()
def gethotspots(url, type):
hotspots = {}
response = requests.get(url, timeout=REQUEST_TIMEOUT)
if response.status_code == 200:
hotspotsjson = response.json()
for hs in hotspotsjson:
gene = hs['hugoSymbol']
start = hs['aminoAcidPosition']['start']
end = hs['aminoAcidPosition']['end']
if type is None or hs['type'] == type:
if gene not in hotspots:
hotspots[gene] = set()
for i in range(start, end + 1):
hotspots[gene].add(i)
else:
log.error("error when processing %s \n" % url +
"reason: %s" % response.reason)
return hotspots
def makeoncokbpostrequest(url, body):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % oncokbapibearertoken
}
return requests.post(url, headers=headers, data=json.dumps(body, default=lambda o: o.__dict__),
timeout=REQUEST_TIMEOUT)
def makeoncokbgetrequest(url):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % oncokbapibearertoken
}
return requests.get(url, headers=headers, timeout=REQUEST_TIMEOUT)
_3dhotspots = None
def init_3d_hotspots():
global _3dhotspots
_3dhotspots = gethotspots(_3dhotspotsbaseurl+"/api/hotspots/3d", None)
conversiondict = {'Ala': 'A',
'Asx': 'B',
'Cys': 'C',
'Asp': 'D',
'Glu': 'E',
'Phe': 'F',
'Gly': 'G',
'His': 'H',
'Ile': 'I',
'Lys': 'K',
'Leu': 'L',
'Met': 'M',
'Asn': 'N',
'Pro': 'P',
'Gln': 'Q',
'Arg': 'R',
'Ser': 'S',
'Thr': 'T',
'Val': 'V',
'Trp': 'W',
'Tyr': 'Y',
'Glx': 'Z'
}
conversionlist = conversiondict.keys()
def conversion(hgvs):
threecharactersearch = re.findall('[a-zA-Z]{3}\d+', hgvs, flags=re.IGNORECASE)
if threecharactersearch:
if any(letters.lower() in hgvs.lower() for letters in conversionlist):
return replace_all(hgvs)
return hgvs
def replace_all(hgvs):
# Author: <NAME>
pattern = re.compile('|'.join(conversionlist), re.IGNORECASE)
return pattern.sub(lambda m: conversiondict[m.group().capitalize()], hgvs)
def append_annotation_to_file(outf, ncols, rows, annotations):
if len(rows) != len(annotations):
log.error('The length of the rows and annotations do not match')
for index, annotation in enumerate(annotations):
row = rows[index]
if annotation is not None:
row = row + annotation
row = padrow(row, ncols)
rowstr = '\t'.join(row)
rowstr = rowstr.encode('ascii', 'ignore').decode('ascii')
outf.write(rowstr + "\n")
def get_tumor_type_from_row(row, row_index, defaultCancerType, icancertype, cancerTypeMap, sample):
cancertype = defaultCancerType
if icancertype >= 0:
row_cancer_type = get_cell_content(row, icancertype)
if row_cancer_type is not None:
cancertype = row_cancer_type
if sample in cancerTypeMap:
cancertype = cancerTypeMap[sample]
if cancertype == "":
log.info("Cancer type for the sample should be defined for a more accurate result\nline %s: %s\n" % (row_index, row))
# continue
return cancertype
def has_desired_headers(desired_headers, file_headers):
has_required_headers = True
for header in desired_headers:
if header not in file_headers:
has_required_headers = False
break
return has_required_headers
def resolve_query_type(user_input_query_type, headers):
selected_query_type = None
if isinstance(user_input_query_type, QueryType):
selected_query_type = user_input_query_type
if selected_query_type is None and HGVSP_SHORT_HEADER in headers:
selected_query_type = QueryType.HGVSP_SHORT
if selected_query_type is None and HGVSP_HEADER in headers:
selected_query_type = QueryType.HGVSP
if selected_query_type is None and HGVSG_HEADER in headers:
selected_query_type = QueryType.HGVSG
if selected_query_type is None and has_desired_headers(REQUIRED_QUERY_TYPE_COLUMNS[QueryType.GENOMIC_CHANGE], headers):
selected_query_type = QueryType.GENOMIC_CHANGE
# default to HGVSp_Short
if selected_query_type is None:
selected_query_type = QueryType.HGVSP_SHORT
# check the file has required columns
if has_desired_headers(REQUIRED_QUERY_TYPE_COLUMNS[selected_query_type], headers) == False:
# when it is False, it will never be GENOMIC_CHANGE. For other types, we need to check whether ALTERATION column is available
if ALTERATION_HEADER not in headers:
raise Exception("The file does not have required columns "
+ ', '.join(REQUIRED_QUERY_TYPE_COLUMNS[user_input_query_type])
+ " for the query type: " + user_input_query_type.value)
return selected_query_type
def get_reference_genome_from_row(row_reference_genome, default_reference_genome):
reference_genome = default_reference_genome
if row_reference_genome is not None and row_reference_genome != '':
try:
reference_genome = ReferenceGenome[row_reference_genome.upper()]
except KeyError:
log.warning('Unexpected reference genome, only GRCh37 and GRCh38 are supported.' + (
' Use default.' if default_reference_genome is not None else ' Skipping.'))
return reference_genome
def processalterationevents(eventfile, outfile, previousoutfile, defaultCancerType, cancerTypeMap,
annotatehotspots, user_input_query_type, default_reference_genome):
if annotatehotspots:
init_3d_hotspots()
if os.path.isfile(previousoutfile):
cacheannotated(previousoutfile, defaultCancerType, cancerTypeMap)
outf = open(outfile, 'w+', 1000)
with open(eventfile, 'rU') as infile:
reader = csv.reader(infile, delimiter='\t')
headers = readheaders(reader)
ncols = headers["length"]
if ncols == 0:
return
newncols = 0
outf.write(headers['^-$'])
if annotatehotspots:
outf.write("\tIS-A-HOTSPOT")
outf.write("\tIS-A-3D-HOTSPOT")
newncols += 2
outf.write("\t" + GENE_IN_ONCOKB_HEADER)
outf.write("\t" + VARIANT_IN_ONCOKB_HEADER)
outf.write("\tMUTATION_EFFECT")
outf.write("\tMUTATION_EFFECT_CITATIONS")
outf.write("\tONCOGENIC")
newncols += 5
for l in levels:
outf.write('\t' + l)
newncols += len(levels)
outf.write("\tHIGHEST_LEVEL")
outf.write("\tTX_CITATIONS")
newncols += 2
for l in dxLevels:
outf.write('\t' + l)
newncols += len(dxLevels)
outf.write("\tHIGHEST_DX_LEVEL")
outf.write("\tDX_CITATIONS")
newncols += 2
for l in pxLevels:
outf.write('\t' + l)
newncols += len(pxLevels)
outf.write("\tHIGHEST_PX_LEVEL")
outf.write("\tPX_CITATIONS")
newncols += 2
outf.write("\n")
query_type = resolve_query_type(user_input_query_type, headers)
if (query_type == QueryType.HGVSP_SHORT):
process_alteration(reader, outf, headers, [HGVSP_SHORT_HEADER, ALTERATION_HEADER], ncols, newncols,
defaultCancerType,
cancerTypeMap, annotatehotspots, default_reference_genome)
if (query_type == QueryType.HGVSP):
process_alteration(reader, outf, headers, [HGVSP_HEADER, ALTERATION_HEADER], ncols, newncols, defaultCancerType,
cancerTypeMap, annotatehotspots, default_reference_genome)
if (query_type == QueryType.HGVSG):
process_hvsg(reader, outf, headers, [HGVSG_HEADER, ALTERATION_HEADER], ncols, newncols, defaultCancerType,
cancerTypeMap, annotatehotspots, default_reference_genome)
if (query_type == QueryType.GENOMIC_CHANGE):
process_genomic_change(reader, outf, headers, ncols, newncols, defaultCancerType, cancerTypeMap, annotatehotspots, default_reference_genome)
outf.close()
def get_cell_content(row, index, return_empty_string=False):
if index >= 0 and row[index] != 'NULL' and row[index] != '':
return row[index]
elif return_empty_string:
return ''
else:
return None
def process_alteration(maffilereader, outf, maf_headers, alteration_column_names, ncols, nannotationcols, defaultCancerType, cancerTypeMap,
annotatehotspots, default_reference_genome):
ihugo = geIndexOfHeader(maf_headers, HUGO_HEADERS)
iconsequence = geIndexOfHeader(maf_headers, CONSEQUENCE_HEADERS)
ihgvs = geIndexOfHeader(maf_headers, alteration_column_names)
isample = geIndexOfHeader(maf_headers, SAMPLE_HEADERS)
istart = geIndexOfHeader(maf_headers, PROTEIN_START_HEADERS)
iend = geIndexOfHeader(maf_headers, PROTEIN_END_HEADERS)
iproteinpos = geIndexOfHeader(maf_headers, PROTEIN_POSITION_HEADERS)
icancertype = geIndexOfHeader(maf_headers, CANCER_TYPE_HEADERS)
ireferencegenome= geIndexOfHeader(maf_headers, REFERENCE_GENOME_HEADERS)
posp = re.compile('[0-9]+')
i = 0
queries = []
rows = []
for row in maffilereader:
i = i + 1
if i % POST_QUERIES_THRESHOLD == 0:
log.info(i)
row = padrow(row, ncols)
sample = row[isample]
if sampleidsfilter and sample not in sampleidsfilter:
continue
hugo = row[ihugo]
consequence = get_cell_content(row, iconsequence)
if consequence in mutationtypeconsequencemap:
consequence = '%2B'.join(mutationtypeconsequencemap[consequence])
hgvs = row[ihgvs]
if hgvs.startswith('p.'):
hgvs = hgvs[2:]
cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)
reference_genome = get_reference_genome_from_row(get_cell_content(row, ireferencegenome), default_reference_genome)
hgvs = conversion(hgvs)
start = get_cell_content(row, istart)
end = get_cell_content(row, iend)
if start is None and iproteinpos >= 0 and row[iproteinpos] != "" and row[iproteinpos] != "." and row[iproteinpos] != "-":
poss = row[iproteinpos].split('/')[0].split('-')
try:
if len(poss) > 0:
start = int(poss[0])
if len(poss) == 2:
end = int(poss[1])
except ValueError:
log.info("position wrong at line %s: %s" % (str(i), row[iproteinpos]))
if start is None and consequence == "missense_variant":
m = posp.search(hgvs)
if m:
start = m.group()
if start is not None and end is None:
end = start
query = ProteinChangeQuery(hugo, hgvs, cancertype, reference_genome, consequence, start, end)
queries.append(query)
rows.append(row)
if len(queries) == POST_QUERIES_THRESHOLD:
annotations = pull_protein_change_info(queries,annotatehotspots)
append_annotation_to_file(outf, ncols + nannotationcols, rows, annotations)
queries = []
rows = []
if len(queries) > 0:
annotations = | |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: vision
:platform: Unix
:synopsis: the top-level submodule of T_System that contains the classes related to T_System's vision ability.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import time # Time access and conversions
import cv2
import face_recognition
import pickle
import numpy as np
import threading
import json
from math import sqrt
from multipledispatch import dispatch
from picamera import PiCamera
from picamera.array import PiRGBArray
from t_system.motion.locking_system import LockingSystem
from t_system.motion import calc_ellipsoidal_angle
from t_system.decision import Decider
from t_system.audition import Hearer
from t_system.recordation import Recorder
from t_system.online_stream import OnlineStreamer
from t_system.high_tech_aim import Aimer
from t_system import T_SYSTEM_PATH
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
class Vision:
"""Class to define a vision of tracking system..
This class provides necessary initiations and functions named :func:`t_system.vision.Vision.detect_track`
as the loop for each camera frames for tracking mode, named :func:`t_system.vision.Vision.learn` as the
learning ability and :func:`t_system.vision.Vision.security` as the security mode.
"""
def __init__(self, args):
"""Initialization method of :class:`t_system.vision.Vision` class.
Args:
args: Command-line arguments.
"""
self.config_file = f'{T_SYSTEM_PATH}/vision/vision_config.json'
with open(self.config_file) as conf_file:
conf_file_json = json.load(conf_file)
self.tracker_types = conf_file_json["tracker_types"] # config file returns the tracker type list.
self.target_mark_types = conf_file_json["target_mark_types"] # config file returns the mark type dict.
self.detection_model = args["detection_model"]
if self.detection_model == "haarcascade":
self.detect_things = self.__detect_with_haarcascade
else:
self.detect_things = self.__detect_with_hog_or_cnn
if args['use_tracking_api']:
self.detect_track = self.__d_t_with_cv_ta
else:
self.detect_track = self.__d_t_without_cv_ta
# Specify the tracker type
self.tracker_type = args["tracker_type"]
self.no_recognize = args["no_recognize"]
if not self.no_recognize:
self.recognition_data = self.__get_recognition_data(args["encoding_file"])
self.track = self.__track_with_recognizing
else:
self.track = self.__track_without_recognizing
self.hearer = Hearer(args)
# self.hearer = None
resolution = (args["resolution"][0], args["resolution"][1])
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = args["framerate"]
self.camera.rotation = args["camera_rotation"]
self.recorder = Recorder(args["shot_format"], args["shoot_formats"], self.camera, self.hearer)
self.online_streamer = OnlineStreamer(self.camera, self.hearer)
self.raw_capture = PiRGBArray(self.camera, size=resolution)
self.object_cascades = self.set_object_cascades(args["cascades"])
(self.frame_width, self.frame_height) = resolution
self.decider = None
if args["AI"] == "official_ai":
self.decider = Decider(args["cascades"][0])
self.target_locker = LockingSystem(args, resolution, self.decider)
self.aimer = Aimer()
self.show_stream = args["show_stream"] # 'show-stream' argument automatically converted this type.
self.mark_object = self.__get_mark_object(args["found_object_mark"])
self.record = args["record"]
self.augmented = False
if args["interface"] == "augmented":
self.augmented = True
self.mqtt_receimitter = None
self.current_frame = np.zeros(shape=(self.frame_height, self.frame_width))
self.stop_thread = False
self.active_threads = []
self.is_watching = False
self.obj_detected = False
# Allow the camera to warm up
# time.sleep(0.1)
def watch(self, stop_thread, format="bgr", caller="security"):
"""The top-level method to provide the video stream for security mode of T_System.
Args:
stop_thread: Stop flag of the tread about terminating it outside of the function's loop.
format (str): Color space format.
caller (str): The method that calls the stream.
"""
self.is_watching = True
if self.record:
self.recorder.start_shoot(caller)
logger.debug("stream starting with capture_continuous")
for frame in self.camera.capture_continuous(self.raw_capture, format=format, use_video_port=True):
if not self.obj_detected:
self.current_frame = frame.array.copy()
self.__show_frame(self.current_frame)
self.__truncate_stream()
if self.__check_loop_ended(stop_thread):
break
else:
self.__truncate_stream()
time.sleep(0.1)
if self.record:
self.stop_recording()
self.is_watching = False
def watch_and(self, task):
"""Method to provide starting watching ability of the around of Vision and accordingly starting given task.
Args:
task: Task for the seer. Either `learn`, `track` or `secure`.
"""
if task == "learn":
learn_thread = threading.Thread(target=self.learn, args=(lambda: self.stop_thread,))
self.active_threads.append(learn_thread)
learn_thread.start()
elif task == "track":
track_thread = threading.Thread(target=self.detect_track, args=(lambda: self.stop_thread,))
self.active_threads.append(track_thread)
track_thread.start()
elif task == "secure":
secure_thread = threading.Thread(target=self.scan, args=(lambda: self.stop_thread, 3))
secure_thread.start()
self.active_threads.append(secure_thread)
def __d_t_without_cv_ta(self, stop_thread, format='bgr'):
"""Method to provide detecting and tracking objects without using OpenCV's tracking API.
Args:
stop_thread: Stop flag of the tread about terminating it outside of the function's loop.
format: Color space format.
"""
self.is_watching = True
self.start_recording("track")
for frame in self.camera.capture_continuous(self.raw_capture, format=format, use_video_port=True):
if not self.obj_detected:
self.current_frame = frame.array.copy()
self.__truncate_stream()
rgb, detected_boxes = self.detect_things(self.current_frame)
if detected_boxes:
self.obj_detected = True
reworked_boxes = self.__relocate_detected_coords(detected_boxes)
if not self.no_recognize:
names = self.__recognize_things(rgb, detected_boxes)
else:
names = None
self.track(self.current_frame, reworked_boxes, names)
self.obj_detected = False
if self.__check_loop_ended(stop_thread):
break
self.stop_recording()
self.is_watching = False
def __d_t_with_cv_ta(self, stop_thread, format='bgr'):
"""Method to provide detecting and tracking objects with using OpenCV's tracking API.
Args:
stop_thread: Stop flag of the tread about terminating it outside of the function's loop.
format: Color space format.
"""
self.is_watching = True
self.start_recording("track")
tracked_boxes = [] # this became array. because of overriding.
names = []
multi_tracker = cv2.MultiTracker_create()
rgb, detected_boxes = self.detect_initiate(stop_thread)
found_count = 0
d_t_failure_count = 0
use_detection = 0
for frame in self.camera.capture_continuous(self.raw_capture, format=format, use_video_port=True):
self.current_frame = frame.array.copy()
self.__truncate_stream()
if len(detected_boxes) > len(tracked_boxes):
if not self.no_recognize:
names = self.__recognize_things(rgb, detected_boxes)
else:
names = None
self.current_frame = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
# Create MultiTracker object
multi_tracker = cv2.MultiTracker_create()
# Initialize MultiTracker
for box in detected_boxes:
# box[3] is x,
# box[0] is y,
# box[1] is x + w,
# box[2] is y + h.
reworked_box = box[3], box[0], box[1] - box[3], box[2] - box[0]
multi_tracker.add(self.__create_tracker_by_name(), self.current_frame, reworked_box)
found_count += 1
if use_detection >= 3:
rgb, detected_boxes = self.detect_things(self.current_frame)
use_detection = 0
use_detection += 1
# Start timer
timer = cv2.getTickCount()
# get updated location of objects in subsequent frames
is_tracking_success, tracked_boxes = multi_tracker.update(self.current_frame)
if not len(detected_boxes) >= len(tracked_boxes):
d_t_failure_count += 1
else:
d_t_failure_count = 0
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
if is_tracking_success and d_t_failure_count < 5:
self.track(self.current_frame, tracked_boxes, names)
elif not is_tracking_success or d_t_failure_count >= 5:
# Tracking failure
cv2.putText(self.current_frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
tracked_boxes = [] # for clearing tracked_boxes list.
# # Display tracker type on frame
# cv2.putText(self.current_frame, self.tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
#
# # Display FPS on frame
# cv2.putText(self.current_frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
if self.__check_loop_ended(stop_thread):
break
self.stop_recording()
self.is_watching = False
def __track_without_recognizing(self, frame, boxes, names):
"""Method to track the objects without recognize them, for detect_track methods.
Args:
frame: Frame matrix in rgb format.
boxes: Tuple variable of locations of detected objects.
names: Names of the recognized objects. A person or just an object.
"""
if len(boxes) == 1:
for (x, y, w, h) in boxes:
physically_distance = self.target_locker.get_physically_distance(w) # for calculating the just about physically distance of object.
radius = int(sqrt(w * w + h * h) / 2)
self.target_locker.lock(x, y, w, h)
self.mark_object(frame, x, y, w, h, radius, physically_distance, (255, 0, 0), 2)
if (self.show_stream and self.augmented) or self.show_stream:
self.__show_frame(frame)
# time.__sleep(0.1) # Allow the servos to complete the moving.
def __track_with_recognizing(self, frame, boxes, names):
"""Method to track the objects with recognize them, for detect_track methods.
Args:
frame: Frame matrix in rgb format.
boxes: Tuple variable of locations of detected objects.
names: Names of the recognized objects. A person or just an object.
"""
for (x, y, w, h), name in zip(boxes, names):
physically_distance = self.target_locker.get_physically_distance(w) # for calculating the just about physically distance of object.
radius = int(sqrt(w * w + h * h) / 2)
if name == "Unknown":
if (self.show_stream and self.augmented) or self.show_stream:
frame = self.aimer.mark_rotating_arcs(frame, (int(x + w / 2), int(y + h / 2)), radius, physically_distance)
else:
self.target_locker.lock(x, y, w, h)
self.mark_object(frame, x, y, w, h, radius, physically_distance, (255, 0, 0), 2)
# time.__sleep(0.1) # Allow the servos to complete the moving.
def learn(self, stop_thread, format="bgr"):
"""The top-level method to learn how to track objects.
Args:
stop_thread: Stop flag of the tread about terminating it outside of the function's loop.
format: Color space format.
"""
self.detect_initiate(stop_thread)
self.is_watching = True
self.start_recording("learn")
for frame in self.camera.capture_continuous(self.raw_capture, format=format, use_video_port=True):
self.current_frame = frame.array.copy()
self.__truncate_stream()
rgb, detected_boxes = self.detect_things(self.current_frame)
# names = self.__recognize_things(rgb, detected_boxes)
reworked_boxes = self.__relocate_detected_coords(detected_boxes)
if not len(reworked_boxes) == 1:
# self.__show_frame(self.current_frame)
pass
else:
for (x, y, w, h) in reworked_boxes:
if (self.show_stream and self.augmented) or self.show_stream:
self.mark_object(self.current_frame, x, y, w, h, 30, 50, (255, 0, 0), 2)
obj_width = w
# obj_area = w * h # unit of obj_width is px ^ 2.
self.target_locker.lock(x, y, w, h)
# time.__sleep(0.2) # allow the camera to capture after moving.
for new_frame in self.camera.capture_continuous(self.raw_capture, format=format, use_video_port=True):
self.current_frame = new_frame.array.copy()
self.__truncate_stream()
rgb, detected_boxes = self.detect_things(self.current_frame)
# names = self.__recognize_things(rgb, detected_boxes)
rb_after_move = self.__relocate_detected_coords(detected_boxes)
if not len(rb_after_move) == 1:
pass
else:
for (ex, ey, ew, eh) in rb_after_move: | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
CLINGON
Command Line INterpreter Generator for pythON
Compatible python 2 and 3
(c) <NAME>, https://github.com/francois-vincent
"""
from __future__ import print_function, absolute_import
from future.utils import listitems, iteritems
from past.builtins import basestring
from collections import Sequence
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import inspect
import os
import sys
import textwrap
from clingon.utils import read_configuration
__version__ = '0.3.1'
DEBUG = False
DELAY_EXECUTION = False
SYSTEM_EXIT_ERROR_CODE = 1
class ClingonError(RuntimeError):
pass
class RunnerError(ClingonError):
pass
class RunnerErrorWithUsage(ClingonError):
pass
class Clizer(object):
"""
This virtual class extracts args off the run() method of any derived subclass
Then it extracts cmd line arguments and launches run() with matched args
"""
SYSTEM_EXIT = True
_help_options = ('--help', '-?')
_version_options = ('--version', '-V')
@classmethod
def _write_error(cls, *args, **kwargs):
sys.stderr.write(kwargs.get('sep', ' ').join(args) + kwargs.get('end', '\n'))
if kwargs.get('exit', True):
return cls._sys_exit()
@staticmethod
def _get_type(arg):
return type(arg).__name__
@classmethod
def _sys_exit(cls, code=SYSTEM_EXIT_ERROR_CODE):
if cls.SYSTEM_EXIT:
if type(code) is not int:
code = 0
if DEBUG:
print("Exit with code %d" % code)
sys.exit(code)
return code
@classmethod
def check_deco_parameters(cls, *args, **kwargs):
if (args and kwargs) or len(args) > 1:
raise ValueError("This decorator is for a function only")
if args and not inspect.isfunction(args[0]):
raise ValueError("This decorator is for a function only")
if kwargs:
for k, v in kwargs.items():
if not isinstance(v, Sequence):
raise ValueError(
"Decorator's keyword '%s' value must be a string or a tuple of strings, found: %s" % (k, v))
def __init__(self, func):
self.func = func
self.docstring = func.__doc__
self.file = inspect.getfile(func)
self._variables = getattr(func, '_variables', {})
argspec = inspect.getargspec(func)
# do not allow keywords
if argspec.keywords:
raise TypeError("Keywords parameter '**%s' is not allowed" % argspec.keywords)
defaults = argspec.defaults or ()
# get varargs
self.varargs = argspec.varargs
# get required args as a list and optional args as a dict (with default values)
nb_args, len_defaults = len(argspec.args), len(defaults)
self.reqargs = argspec.args[:nb_args - len_defaults]
options = OrderedDict(zip((x.lower() for x in argspec.args[nb_args - len_defaults:]), defaults))
self._check_booleans(options)
# make a copy of options for later call of user's decorated function
self.python_options = OrderedDict(options)
# make an equivalence dict from line cmd style (--file-name) to python style (file_name) args
self.options_equ = dict([('-' + x if len(x) == 1 else '--' + '-'.join(x.split('_')), x) for x in options])
# make a dict of cmd line style arg names to their types
self.options = OrderedDict(
[('-' + x if len(x) == 1 else '--' + '-'.join(x.split('_')), options[x]) for x in options])
# take a copy of original (no aliases yet) optional args for print_help()
self._options = OrderedDict(self.options)
# create automatic short options aliases from long options
self.options_aliases = getattr(self, 'options_aliases', {})
mismatch = set(self.options_aliases) - set(options)
if mismatch:
raise ValueError("This option does not exists so can't be given an alias: " + mismatch.pop())
for k in options:
if k not in self.options_aliases and len(k) > 1:
self.options_aliases[k] = (k[0],)
# inject aliases into dicts
for x, t in options.items():
if x in self.options_aliases:
alias = self.options_aliases[x]
if isinstance(alias, basestring):
alias = self.options_aliases[x] = (alias,)
new_alias = []
for a in alias:
k = '-' + a
if k in self.options or k in self._version_options:
# silently ignore duplicate short alias
continue
self.options[k] = t
self.options_equ[k] = x
new_alias.append(a)
self.options_aliases[x] = new_alias
if DEBUG:
print('clize default parameters:',
self.reqargs + list(options.values()) + (['*' + self.varargs] if self.varargs else []))
@classmethod
def _check_booleans(cls, options):
for k, v in iteritems(options):
if v is True:
cls._write_error("Default value for boolean option %r must be 'False'" % k)
def eval_option_value(self, option):
""" Evaluates an option
:param option: a string
:return: an object of type str, bool, int, float or list
"""
try:
value = eval(option, {}, {})
except (SyntaxError, NameError, TypeError):
return option
if type(value) in (str, bool, int, float):
return value
elif type(value) in (list, tuple):
for v in value:
if type(v) not in (str, bool, int, float):
self._write_error("Value of element of list object has wrong type %s" % v)
return value
return option
def get_options_from_environ(self, options):
prefix = self._variables.get('CLINGON_PREFIX')
if prefix:
for k in list(self.python_options):
env_option = os.environ.get(prefix + '_' + k.upper(), None)
if env_option is not None:
options[k] = self.eval_option_value(env_option)
def get_options_from_file(self, options):
"""
Search and read a configuration file to override options.
Available formats are python, yaml and json (file extension rules).
By default, there is no configuration file and this method exits immediately.
To define a configuration file, use:
- variable OPTIONS_FILE,
- optional special parameter --options_file
search order is:
- hardcoded if file is an absolute path,
- hardcoded path in variable OPTIONS_PATH if existing,
- local directory,
- ~/.clingon/,
- /etc/clingon/,
If a configuration file is found, sets the variable
options_file_path to effective_path/effective_file.
"""
options_file = self.python_options.get('options_file') or self._variables.get('OPTIONS_FILE')
if not options_file:
self._variables['options_file_path'] = None
return
options_path = self._variables.get('OPTIONS_PATH')
options_dict, options_file_path = None, None
try:
if options_path or os.path.isabs(options_file):
options_file_path, options_dict = read_configuration(options_file, options_path)
else:
for path in (os.getcwd(), os.path.expanduser('~/.clingon'), '/etc/clingon/'):
try:
options_file_path, options_dict = read_configuration(options_file, path)
break
except RuntimeError as e:
error = e
except (RuntimeError, TypeError) as e:
self._write_error(str(e))
self._variables['options_file_path'] = options_file_path
if options_dict:
for k in list(self.python_options):
default = options_dict.get(k)
if default is not None:
options[k] = self.eval_option_value(default)
else:
self._write_error(str(error))
def _eval_variables(self):
"""evaluates callable _variables
"""
for k, v in listitems(self._variables):
self._variables[k] = v() if hasattr(v, '__call__') else v
def _get_variable(self, key):
return self._variables.get(key)
def _print_version(self):
source = os.path.dirname(self.file)
source = ' from ' + source if source else ''
self._write_error('version %s%s (Python %s)' %
(self._get_variable('VERSION'), source, sys.version.split()[0]), exit=False)
def start(self, param_string=None):
""" Parses command line parameters
A string can be passed to simulate a cli for test purpose
"""
external_opt = {}
self.get_options_from_environ(external_opt)
self.get_options_from_file(external_opt)
# construct optional args, required args and variable args as we find then in the command line
optargs = {}
reqargs, varargs = [], []
# get parameters from command line, or from parameter string (latter essentially for tests)
if param_string is None:
argv = sys.argv[1:]
else:
import shlex
argv = shlex.split(param_string)
self._eval_variables()
i = 0
while i < len(argv):
# get next parameter
x = argv[i]
# check for help
if x in self._help_options:
self._print_help()
return
# check for version
if x in self._version_options and self._get_variable('VERSION'):
self._print_version()
return
# parameter is an option
if x in self.options:
o_x = self.options[x]
oe_x = self.options_equ[x]
# check for duplicates
if oe_x in optargs:
raise RunnerError("Option '%s' found twice" % x)
# proceed with lists or tuples
if isinstance(o_x, (list, tuple)):
argpos = i
optargs[oe_x] = []
i += 1
# iterate till end of parameters list, or to next option occurrence
while i < len(argv) and argv[i] not in self.options:
# try to convert element to expected type, then store it
try:
optargs[oe_x].append(type(o_x[0])(argv[i]))
except ValueError:
raise RunnerErrorWithUsage("Argument %d of option %s has wrong type (%s expected)" %
(i - argpos, x, self._format_type(o_x[0])))
# if no expected type, just store element
except IndexError:
optargs[oe_x].append(argv[i])
i += 1
# check that list option is not empty
if not len(optargs[oe_x]) and not len(o_x):
raise RunnerErrorWithUsage("Option '%s' should be followed by a list of values" % x)
# check number of element if default list/tuple is not empty
if len(o_x) and len(optargs[oe_x]) != len(o_x):
raise RunnerErrorWithUsage("Option '%s' should be followed by a list of %d %s, found %d" %
(x, len(o_x), self._format_type(o_x[0]), len(optargs[oe_x])))
# proceed boolean
elif type(o_x) is bool:
optargs[oe_x] = True
i += 1
# proceed other types (string, integer, float)
else:
i += 1
# check that option is given a value
if i >= len(argv) or argv[i] in self.options:
raise RunnerErrorWithUsage("Option '%s' should be followed by a %s" %
(x, self._format_type(o_x)))
# try to convert element to expected type, then store it
try:
optargs[oe_x] = type(o_x)(argv[i])
except ValueError:
raise RunnerErrorWithUsage("Argument of option %s has wrong type (%s expected)" %
(x, self._format_type(o_x)))
i += 1
# parameter may be a required or a variable parameter, or unrecognized
else:
if x.startswith('-'):
raise RunnerErrorWithUsage("Unrecognized option '%s'" % argv[i])
elif len(reqargs) < len(self.reqargs):
reqargs.append(argv[i])
elif self.varargs:
varargs.append(argv[i])
else:
raise RunnerErrorWithUsage("Unrecognized parameter | |
<gh_stars>10-100
import numpy as np
from classes.Utils import calculate_average, distance_2points, innerDBSCAN
class FeatureBuilder(object):
# speed group features
speedgroup = {
'all_avrg_x':-1,
'all_avrg_y':-1,
'all_avrg_speed':-1,
'all_avrg_direction_x':0,
'all_avrg_direction_y':0,
'all_inner_dis_to_avrg_pos':0,
'slow_avrg_x':-1,
'slow_avrg_y':-1,
'slow_avrg_speed':-1,
'hir_avrg_x':-1,
'hir_avrg_y':-1,
'hir_avrg_speed':-1,
'sprint_avrg_x':-1,
'sprint_avrg_y':-1,
'sprint_avrg_speed':-1,
}
# teams features
team = {
'dbscan_avrg_x':-1,
'dbscan_avrg_y':-1,
'dbscan_avrg_speed':-1,
'inner_dis_to_dbscan_pos':0,
'gk_x':-1,
'gk_y':-1,
'gk_speed':-1,
'gk_direction_x':0,
'gk_direction_y':0,
'min_x':-1,
'min_x_speed':-1,
'max_x':-1,
'max_x_speed':-1,
'min_y':-1,
'min_y_speed':-1,
'max_y':-1,
'max_y_speed':-1,
'min_speed':-1,
'min_speed_x':-1,
'min_speed_y':-1,
'max_speed':-1,
'max_speed_x':-1,
'max_speed_y':-1
}
# both teams together features
bothteams = {
'avrg_x':-1,
'avrg_y':-1,
'avrg_speed':-1,
'inner_dis_to_avrg_pos':0,
'dbscan_avrg_x':-1,
'dbscan_avrg_y':-1,
'dbscan_avrg_speed':0,
'inner_dis_to_dbscan_pos':0,
}
# referee features
referee = {
'x':-1,
'y':-1,
'speed':-1,
'direction_x':0,
'direction_y':0
}
def __init__(self, player_role_list):
'''
player_role_list: array storing roles (['LB', 'CB', 'RB', .etc'])
'''
if (type(player_role_list)!=list):
raise TypeError
else:
self.player_role_list = player_role_list
# dict array to store spatiotemporal data: referee data, goalkeeper data, and role data
self.referee_data = {'x':[], 'y':[], 'previous_x':[], 'previous_y':[], 'speed':[]}
self.goalkeeper_data = {'home':{'x':[], 'y':[], 'previous_x':[], 'previous_y':[], 'speed':[]},
'away':{'x':[], 'y':[], 'previous_x':[], 'previous_y':[], 'speed':[]}}
self.role_data = {'home':{},'away':{}}
for role in self.player_role_list + ['Team']:
for team, _ in self.role_data.items():
self.role_data[team][role] = {'all_x':[],'all_y':[], 'all_speed':[],
'previous_all_x':[], 'previous_all_y':[],
'hir_x':[], 'hir_y':[], 'hir_speed':[],
'slow_x':[], 'slow_y':[], 'slow_speed':[],
'sprint_x':[], 'sprint_y':[], 'sprint_speed':[]}
def reset_role_data(self):
'''
rest lists in the dict, excluting the keys store previousious data
'''
for team, _ in self.role_data.items():
for role, _ in self.role_data[team].items():
for key, _ in self.role_data[team][role].items():
if 'previous' not in key: self.role_data[team][role][key] = []
def reset_goalkeeper_data(self):
'''
rest lists in the dict, excluting the keys store previousious data
'''
for team, _ in self.goalkeeper_data.items():
for key, _ in self.goalkeeper_data.items():
if 'previous' not in key: self.goalkeeper_data[team][key] = []
def reset_referee_data(self):
'''
rest lists in the dict, excluting the keys store previousious data
'''
for key, _ in self.referee_data.items():
if 'previous' not in key: self.referee_data[key] = []
def empty_speedgroup(self):
'''
Empty rolegroup feature dict
'''
self.speedgroup = {key: 0 if 'direction' in key else -1 for key, values in self.speedgroup.items()}
def empty_team(self):
'''
Empty team feature dict
'''
self.team = {key: 0 if 'direction' in key else -1 for key, values in self.team.items()}
def empty_bothteams(self):
'''
Empty booth-teams featrure dict
'''
self.bothteams = {key: 0 if 'direction' in key else -1 for key, values in self.bothteams.items()}
def empty_referee(self):
'''
Empty refree feature dict
'''
self.referee = {key: 0 if 'direction' in key else -1 for key, values in self.referee.items()}
def speedgroup_to_list(self):
'''
Convert reolegroup dict values to list
Output
------
feature_list: 1-D list
'''
features_list = []
for key, value in self.speedgroup.items():
features_list.append(value)
return features_list
def team_to_list(self):
'''
Convert team dict values to list
Output
------
feature_list: 1-D list
'''
features_list = []
for key, value in self.team.items():
features_list.append(value)
return features_list
def bothteams_to_list(self):
'''
Convert both-team dict values to list
Output
------
feature_list: 1-D list
'''
features_list = []
for key, value in self.bothteams.items():
features_list.append(value)
return features_list
def referee_to_list(self):
'''
Convert refree dict values to list
Output
------
feature_list: 1-D list
'''
features_list = []
for key, value in self.referee.items():
features_list.append(value)
return features_list
def get_feature_labels(self):
'''
Return labels of features set
Output
------
labels: string list
'''
labels = []
for tmp_team in ['home','away']:
# for speedgoup
for tmp_role in self.player_role_list+['Team']:
labels += [tmp_team + '_' + tmp_role + '_' + item for item in self.speedgroup.keys()]
# for team
labels += [tmp_team + '_' + tmp_role + '_' + item for item in self.team.keys()]
# for bothteams
labels += ['bothteams_' + item for item in self.bothteams.keys()]
# for referee
labels += ['referee_' + item for item in self.referee.keys()]
return labels
def calculate_features(self):
'''
Here we calculate all features for each group: speed_group, teams, bothteams, referee
NOTE: Structure of data dict are given above and group key are indicated in the class itself
Outpu:
features: list stores all the claculated fatures
'''
features = [] # list to store all feature set
# empty feature dict
self.empty_speedgroup()
self.empty_team()
self.empty_bothteams()
self.empty_referee()
for tmp_team in ['home','away']:
# SET SPEED GROUP FEATURES (for roles and teams)
# iterate for every role
for tmp_role in self.player_role_list+['Team']:
dict_values = self.role_data[tmp_team][tmp_role] # get dict value
self.empty_speedgroup() # empy feature dict
for tmp_prefix in ['all', 'slow', 'hir', 'sprint']:
# -------------- role speed group -----------------
self.speedgroup[tmp_prefix+'_avrg_x'] = calculate_average(dict_values[tmp_prefix+'_x'])
self.speedgroup[tmp_prefix+'_avrg_y'] = calculate_average(dict_values[tmp_prefix+'_y'])
self.speedgroup[tmp_prefix+'_avrg_speed'] = calculate_average(dict_values[tmp_prefix+'_speed'])
# -------------- team speed group -----------------
if tmp_prefix == 'all':
previous_avrg_x = calculate_average(dict_values['previous_'+tmp_prefix+'_x'])
avrg_x = calculate_average(dict_values[tmp_prefix+'_x'])
previous_avrg_y = calculate_average(dict_values['previous_'+tmp_prefix+'_y'])
avrg_y = calculate_average(dict_values[tmp_prefix+'_y'])
self.role_data[tmp_team][tmp_role]['previous_'+tmp_prefix+'_x'] = dict_values[tmp_prefix+'_x']
self.role_data[tmp_team][tmp_role]['previous_'+tmp_prefix+'_y'] = dict_values[tmp_prefix+'_y']
inner_dis_to_avrg_pos = 0
for pos_i in range(len(dict_values[tmp_prefix+'_x'])):
inner_dis_to_avrg_pos += distance_2points(avrg_x, avrg_y, dict_values[tmp_prefix+'_x'][pos_i], dict_values[tmp_prefix+'_y'][pos_i])
self.speedgroup[tmp_prefix+'_avrg_direction_x'] = -1 if previous_avrg_x>avrg_x else 1 if previous_avrg_x<avrg_x else 0
self.speedgroup[tmp_prefix+'_avrg_direction_y'] = -1 if previous_avrg_y>avrg_y else 1 if previous_avrg_y<avrg_y else 0
self.speedgroup[tmp_prefix+'_inner_dis_to_avrg_pos'] = round(inner_dis_to_avrg_pos, 2)
del previous_avrg_x, avrg_x, previous_avrg_y, avrg_y, inner_dis_to_avrg_pos
features += self.speedgroup_to_list() # add to feature list
# SET SPECIAL TEAM FEATURES
tmp_role = 'Team'
tmp_prefix = 'all'
dict_values = self.role_data[tmp_team][tmp_role] # get dict value
self.empty_team() # empy feature dict
# -------------- player dbscan -----------------
pos_array_2d = np.array([[dict_values[tmp_prefix+'_x'][dict_values_i], dict_values[tmp_prefix+'_y'][dict_values_i]] for dict_values_i in range(len(dict_values[tmp_prefix+'_y']))])
# get dbscan indices
dbscan_indices = innerDBSCAN(pos_array_2d, 20, 4) # min meter = 20, min cluster size = 4
# get the selected indices
dbscan_x = np.array(dict_values[tmp_prefix+'_x'])[dbscan_indices]
dbscan_y = np.array(dict_values[tmp_prefix+'_y'])[dbscan_indices]
dbscan_speed = np.array(dict_values[tmp_prefix+'_speed'])[dbscan_indices]
dbscan_avrg_x = calculate_average(dbscan_x)
dbscan_avrg_y = calculate_average(dbscan_y)
dbscan_avrg_speed = calculate_average(dbscan_speed)
inner_dis_to_dbscan_pos = 0
for pos_i in range(len(dbscan_x)):
inner_dis_to_dbscan_pos += distance_2points(dbscan_avrg_x, dbscan_avrg_y, dbscan_x[pos_i], dbscan_y[pos_i])
self.team['dbscan_avrg_x'] = dbscan_avrg_x
self.team['dbscan_avrg_y'] = dbscan_avrg_y
self.team['dbscan_avrg_speed'] = dbscan_avrg_speed
self.team['inner_dis_to_dbscan_pos'] = round(inner_dis_to_dbscan_pos, 2)
del pos_array_2d, dbscan_indices, dbscan_x, dbscan_y, dbscan_speed, dbscan_avrg_x, dbscan_avrg_y, dbscan_avrg_speed, inner_dis_to_dbscan_pos
# -------------- player normal -----------------
self.team['min_x'] = min(dict_values[tmp_prefix+'_x'])
self.team['min_x_speed'] = dict_values[tmp_prefix+'_speed'][np.argmin(dict_values[tmp_prefix+'_x'])]
self.team['max_x'] = max(dict_values[tmp_prefix+'_x'])
self.team['max_x_speed'] = dict_values[tmp_prefix+'_speed'][np.argmax(dict_values[tmp_prefix+'_x'])]
self.team['min_y'] = min(dict_values[tmp_prefix+'_y'])
self.team['min_y_speed'] = dict_values[tmp_prefix+'_speed'][np.argmin(dict_values[tmp_prefix+'_y'])]
self.team['max_y'] = max(dict_values[tmp_prefix+'_y'])
self.team['max_y_speed'] = dict_values[tmp_prefix+'_speed'][np.argmax(dict_values[tmp_prefix+'_y'])]
self.team['min_speed'] = min(dict_values[tmp_prefix+'_speed'])
self.team['min_speed_x'] = dict_values[tmp_prefix+'_x'][np.argmin(dict_values[tmp_prefix+'_speed'])]
self.team['min_speed_y'] = dict_values[tmp_prefix+'_y'][np.argmin(dict_values[tmp_prefix+'_speed'])]
self.team['max_speed'] = max(dict_values[tmp_prefix+'_speed'])
self.team['max_speed_x'] = dict_values[tmp_prefix+'_x'][np.argmax(dict_values[tmp_prefix+'_speed'])]
self.team['max_speed_y'] = dict_values[tmp_prefix+'_y'][np.argmax(dict_values[tmp_prefix+'_speed'])]
# -------------- goalkeeper -----------------
previous_avrg_x = calculate_average(self.goalkeeper_data[tmp_team]['previous_x'])
avrg_x = calculate_average(self.goalkeeper_data[tmp_team]['x'])
previous_avrg_y = calculate_average(self.goalkeeper_data[tmp_team]['previous_y'])
avrg_y = calculate_average(self.goalkeeper_data[tmp_team]['y'])
self.goalkeeper_data[tmp_team]['previous_x'] = self.goalkeeper_data[tmp_team]['x']
self.goalkeeper_data[tmp_team]['previous_x'] = self.goalkeeper_data[tmp_team]['x']
self.team['gk_x'] = avrg_x
self.team['gk_y'] = avrg_y
self.team['gk_speed'] = calculate_average(self.goalkeeper_data[tmp_team]['speed'])
self.team['gk_direction_x'] = -1 if previous_avrg_x>avrg_x else 1 if previous_avrg_x<avrg_x else 0
self.team['gk_direction_y'] = -1 if previous_avrg_y>avrg_y else 1 if previous_avrg_y<avrg_y else 0
del previous_avrg_x, avrg_x, previous_avrg_y, avrg_y
features += self.team_to_list() # add to feature list
# SET SPECIAL BOTHTEAMS FEATURES
self.empty_bothteams() # empy feature dict
tmp_role = 'Team'
# get values of both teams
bothteams_x = self.role_data['home'][tmp_role]['all_x'] + self.role_data['away'][tmp_role]['all_x']
bothteams_y = self.role_data['home'][tmp_role]['all_y'] + self.role_data['away'][tmp_role]['all_y']
bothteams_speed = self.role_data['home'][tmp_role]['all_speed'] + self.role_data['away'][tmp_role]['all_speed']
# -------------- player normal -----------------
bothteams_avrg_x = calculate_average(bothteams_x)
bothteams_avrg_y = calculate_average(bothteams_y)
bothteams_avrg_speed = calculate_average(bothteams_speed)
inner_dis_to_avrg_pos = 0
for pos_i in range(len(bothteams_x)):
inner_dis_to_avrg_pos += distance_2points(bothteams_avrg_x, bothteams_avrg_y, bothteams_x[pos_i], bothteams_y[pos_i])
self.bothteams['avrg_x'] = bothteams_avrg_x
self.bothteams['avrg_y'] = bothteams_avrg_y
self.bothteams['avrg_speed'] = bothteams_avrg_speed
self.bothteams['inner_dis_to_avrg_pos'] = round(inner_dis_to_avrg_pos, 2)
del bothteams_avrg_x, bothteams_avrg_y, bothteams_avrg_speed, inner_dis_to_avrg_pos
# -------------- player dbscan -----------------
pos_array_2d = np.array([[bothteams_x[teams_val_i], bothteams_y[teams_val_i]] for teams_val_i in range(len(bothteams_x))])
# get dbscan indices
dbscan_indices = innerDBSCAN(pos_array_2d, 20, 7) # min meter = 20, min cluster size = 7
# get the selected indices
dbscan_x = np.array(bothteams_x)[dbscan_indices]
dbscan_y = np.array(bothteams_y)[dbscan_indices]
dbscan_speed = np.array(bothteams_speed)[dbscan_indices]
dbscan_avrg_x = calculate_average(dbscan_x)
dbscan_avrg_y = calculate_average(dbscan_y)
dbscan_avrg_speed = calculate_average(dbscan_speed)
inner_dis_to_dbscan_pos = 0
for pos_i in range(len(dbscan_x)):
inner_dis_to_dbscan_pos += distance_2points(dbscan_avrg_x, dbscan_avrg_y, dbscan_x[pos_i], dbscan_y[pos_i])
self.bothteams['dbscan_avrg_x'] = dbscan_avrg_x
self.bothteams['dbscan_avrg_y'] = dbscan_avrg_y
self.bothteams['dbscan_avrg_speed'] = dbscan_avrg_speed
self.bothteams['inner_dis_to_dbscan_pos'] = round(inner_dis_to_dbscan_pos, 2)
del pos_array_2d, dbscan_indices, dbscan_x, dbscan_y, dbscan_speed, dbscan_avrg_x, dbscan_avrg_y, dbscan_avrg_speed, inner_dis_to_dbscan_pos
del bothteams_x, bothteams_y, bothteams_speed
features += self.bothteams_to_list() # add to feature list
# SET REFEREE FEATURES
self.empty_referee() # empty list
previous_avrg_x = calculate_average(self.referee_data['previous_x'])
avrg_x = calculate_average(self.referee_data['x'])
previous_avrg_y = calculate_average(self.referee_data['previous_y'])
avrg_y | |
obj = union_set(ctx=ctx, ptr=res)
return obj
def get_domain(arg0):
return arg0.domain()
@staticmethod
def from_domain(arg0):
try:
if not arg0.__class__ is union_set:
arg0 = union_set(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_from_domain(isl.isl_union_set_copy(arg0.ptr))
obj = schedule(ctx=ctx, ptr=res)
return obj
def map(arg0):
try:
if not arg0.__class__ is schedule:
arg0 = schedule(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_get_map(arg0.ptr)
obj = union_map(ctx=ctx, ptr=res)
return obj
def get_map(arg0):
return arg0.map()
def pullback(*args):
if len(args) == 2 and args[1].__class__ is union_pw_multi_aff:
ctx = args[0].ctx
res = isl.isl_schedule_pullback_union_pw_multi_aff(isl.isl_schedule_copy(args[0].ptr), isl.isl_union_pw_multi_aff_copy(args[1].ptr))
obj = schedule(ctx=ctx, ptr=res)
return obj
raise Error
def root(arg0):
try:
if not arg0.__class__ is schedule:
arg0 = schedule(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_get_root(arg0.ptr)
obj = schedule_node(ctx=ctx, ptr=res)
return obj
def get_root(arg0):
return arg0.root()
isl.isl_schedule_read_from_str.restype = c_void_p
isl.isl_schedule_read_from_str.argtypes = [Context, c_char_p]
isl.isl_schedule_get_domain.restype = c_void_p
isl.isl_schedule_get_domain.argtypes = [c_void_p]
isl.isl_schedule_from_domain.restype = c_void_p
isl.isl_schedule_from_domain.argtypes = [c_void_p]
isl.isl_schedule_get_map.restype = c_void_p
isl.isl_schedule_get_map.argtypes = [c_void_p]
isl.isl_schedule_pullback_union_pw_multi_aff.restype = c_void_p
isl.isl_schedule_pullback_union_pw_multi_aff.argtypes = [c_void_p, c_void_p]
isl.isl_schedule_get_root.restype = c_void_p
isl.isl_schedule_get_root.argtypes = [c_void_p]
isl.isl_schedule_copy.restype = c_void_p
isl.isl_schedule_copy.argtypes = [c_void_p]
isl.isl_schedule_free.restype = c_void_p
isl.isl_schedule_free.argtypes = [c_void_p]
isl.isl_schedule_to_str.restype = POINTER(c_char)
isl.isl_schedule_to_str.argtypes = [c_void_p]
class schedule_constraints(object):
def __init__(self, *args, **keywords):
if "ptr" in keywords:
self.ctx = keywords["ctx"]
self.ptr = keywords["ptr"]
return
if len(args) == 1 and type(args[0]) == str:
self.ctx = Context.getDefaultInstance()
self.ptr = isl.isl_schedule_constraints_read_from_str(self.ctx, args[0].encode('ascii'))
return
raise Error
def __del__(self):
if hasattr(self, 'ptr'):
isl.isl_schedule_constraints_free(self.ptr)
def __str__(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ptr = isl.isl_schedule_constraints_to_str(arg0.ptr)
res = cast(ptr, c_char_p).value.decode('ascii')
libc.free(ptr)
return res
def __repr__(self):
s = str(self)
if '"' in s:
return 'isl.schedule_constraints("""%s""")' % s
else:
return 'isl.schedule_constraints("%s")' % s
def coincidence(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_get_coincidence(arg0.ptr)
obj = union_map(ctx=ctx, ptr=res)
return obj
def get_coincidence(arg0):
return arg0.coincidence()
def compute_schedule(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_compute_schedule(isl.isl_schedule_constraints_copy(arg0.ptr))
obj = schedule(ctx=ctx, ptr=res)
return obj
def conditional_validity(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_get_conditional_validity(arg0.ptr)
obj = union_map(ctx=ctx, ptr=res)
return obj
def get_conditional_validity(arg0):
return arg0.conditional_validity()
def conditional_validity_condition(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_get_conditional_validity_condition(arg0.ptr)
obj = union_map(ctx=ctx, ptr=res)
return obj
def get_conditional_validity_condition(arg0):
return arg0.conditional_validity_condition()
def context(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_get_context(arg0.ptr)
obj = set(ctx=ctx, ptr=res)
return obj
def get_context(arg0):
return arg0.context()
def domain(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_get_domain(arg0.ptr)
obj = union_set(ctx=ctx, ptr=res)
return obj
def get_domain(arg0):
return arg0.domain()
@staticmethod
def on_domain(arg0):
try:
if not arg0.__class__ is union_set:
arg0 = union_set(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_on_domain(isl.isl_union_set_copy(arg0.ptr))
obj = schedule_constraints(ctx=ctx, ptr=res)
return obj
def proximity(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_get_proximity(arg0.ptr)
obj = union_map(ctx=ctx, ptr=res)
return obj
def get_proximity(arg0):
return arg0.proximity()
def set_coincidence(arg0, arg1):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
try:
if not arg1.__class__ is union_map:
arg1 = union_map(arg1)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_set_coincidence(isl.isl_schedule_constraints_copy(arg0.ptr), isl.isl_union_map_copy(arg1.ptr))
obj = schedule_constraints(ctx=ctx, ptr=res)
return obj
def set_conditional_validity(arg0, arg1, arg2):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
try:
if not arg1.__class__ is union_map:
arg1 = union_map(arg1)
except:
raise
try:
if not arg2.__class__ is union_map:
arg2 = union_map(arg2)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_set_conditional_validity(isl.isl_schedule_constraints_copy(arg0.ptr), isl.isl_union_map_copy(arg1.ptr), isl.isl_union_map_copy(arg2.ptr))
obj = schedule_constraints(ctx=ctx, ptr=res)
return obj
def set_context(arg0, arg1):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_set_context(isl.isl_schedule_constraints_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
obj = schedule_constraints(ctx=ctx, ptr=res)
return obj
def set_proximity(arg0, arg1):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
try:
if not arg1.__class__ is union_map:
arg1 = union_map(arg1)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_set_proximity(isl.isl_schedule_constraints_copy(arg0.ptr), isl.isl_union_map_copy(arg1.ptr))
obj = schedule_constraints(ctx=ctx, ptr=res)
return obj
def set_validity(arg0, arg1):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
try:
if not arg1.__class__ is union_map:
arg1 = union_map(arg1)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_set_validity(isl.isl_schedule_constraints_copy(arg0.ptr), isl.isl_union_map_copy(arg1.ptr))
obj = schedule_constraints(ctx=ctx, ptr=res)
return obj
def validity(arg0):
try:
if not arg0.__class__ is schedule_constraints:
arg0 = schedule_constraints(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_constraints_get_validity(arg0.ptr)
obj = union_map(ctx=ctx, ptr=res)
return obj
def get_validity(arg0):
return arg0.validity()
isl.isl_schedule_constraints_read_from_str.restype = c_void_p
isl.isl_schedule_constraints_read_from_str.argtypes = [Context, c_char_p]
isl.isl_schedule_constraints_get_coincidence.restype = c_void_p
isl.isl_schedule_constraints_get_coincidence.argtypes = [c_void_p]
isl.isl_schedule_constraints_compute_schedule.restype = c_void_p
isl.isl_schedule_constraints_compute_schedule.argtypes = [c_void_p]
isl.isl_schedule_constraints_get_conditional_validity.restype = c_void_p
isl.isl_schedule_constraints_get_conditional_validity.argtypes = [c_void_p]
isl.isl_schedule_constraints_get_conditional_validity_condition.restype = c_void_p
isl.isl_schedule_constraints_get_conditional_validity_condition.argtypes = [c_void_p]
isl.isl_schedule_constraints_get_context.restype = c_void_p
isl.isl_schedule_constraints_get_context.argtypes = [c_void_p]
isl.isl_schedule_constraints_get_domain.restype = c_void_p
isl.isl_schedule_constraints_get_domain.argtypes = [c_void_p]
isl.isl_schedule_constraints_on_domain.restype = c_void_p
isl.isl_schedule_constraints_on_domain.argtypes = [c_void_p]
isl.isl_schedule_constraints_get_proximity.restype = c_void_p
isl.isl_schedule_constraints_get_proximity.argtypes = [c_void_p]
isl.isl_schedule_constraints_set_coincidence.restype = c_void_p
isl.isl_schedule_constraints_set_coincidence.argtypes = [c_void_p, c_void_p]
isl.isl_schedule_constraints_set_conditional_validity.restype = c_void_p
isl.isl_schedule_constraints_set_conditional_validity.argtypes = [c_void_p, c_void_p, c_void_p]
isl.isl_schedule_constraints_set_context.restype = c_void_p
isl.isl_schedule_constraints_set_context.argtypes = [c_void_p, c_void_p]
isl.isl_schedule_constraints_set_proximity.restype = c_void_p
isl.isl_schedule_constraints_set_proximity.argtypes = [c_void_p, c_void_p]
isl.isl_schedule_constraints_set_validity.restype = c_void_p
isl.isl_schedule_constraints_set_validity.argtypes = [c_void_p, c_void_p]
isl.isl_schedule_constraints_get_validity.restype = c_void_p
isl.isl_schedule_constraints_get_validity.argtypes = [c_void_p]
isl.isl_schedule_constraints_copy.restype = c_void_p
isl.isl_schedule_constraints_copy.argtypes = [c_void_p]
isl.isl_schedule_constraints_free.restype = c_void_p
isl.isl_schedule_constraints_free.argtypes = [c_void_p]
isl.isl_schedule_constraints_to_str.restype = POINTER(c_char)
isl.isl_schedule_constraints_to_str.argtypes = [c_void_p]
class schedule_node(object):
def __init__(self, *args, **keywords):
if "ptr" in keywords:
self.ctx = keywords["ctx"]
self.ptr = keywords["ptr"]
return
if len(args) == 1 and isinstance(args[0], schedule_node_band):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_context):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_domain):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_expansion):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_extension):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_filter):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_leaf):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_guard):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_mark):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_sequence):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
if len(args) == 1 and isinstance(args[0], schedule_node_set):
self.ctx = args[0].ctx
self.ptr = isl.isl_schedule_node_copy(args[0].ptr)
return
raise Error
def __del__(self):
if hasattr(self, 'ptr'):
isl.isl_schedule_node_free(self.ptr)
def __new__(cls, *args, **keywords):
if "ptr" in keywords:
type = isl.isl_schedule_node_get_type(keywords["ptr"])
if type == 0:
return schedule_node_band(**keywords)
if type == 1:
return schedule_node_context(**keywords)
if type == 2:
return schedule_node_domain(**keywords)
if type == 3:
return schedule_node_expansion(**keywords)
if type == 4:
return schedule_node_extension(**keywords)
if type == 5:
return schedule_node_filter(**keywords)
if type == 6:
return schedule_node_leaf(**keywords)
if type == 7:
return schedule_node_guard(**keywords)
if type == 8:
return schedule_node_mark(**keywords)
if type == 9:
return schedule_node_sequence(**keywords)
if type == 10:
return schedule_node_set(**keywords)
raise
return super(schedule_node, cls).__new__(cls)
def __str__(arg0):
try:
if not arg0.__class__ is schedule_node:
arg0 = schedule_node(arg0)
except:
raise
ptr = isl.isl_schedule_node_to_str(arg0.ptr)
res = cast(ptr, c_char_p).value.decode('ascii')
libc.free(ptr)
return res
def __repr__(self):
s = str(self)
if '"' in s:
return 'isl.schedule_node("""%s""")' % s
else:
return 'isl.schedule_node("%s")' % s
def ancestor(arg0, arg1):
try:
if not arg0.__class__ is schedule_node:
arg0 = schedule_node(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_node_ancestor(isl.isl_schedule_node_copy(arg0.ptr), arg1)
obj = schedule_node(ctx=ctx, ptr=res)
return obj
def ancestor_child_position(arg0, arg1):
try:
if not arg0.__class__ is schedule_node:
arg0 = schedule_node(arg0)
except:
raise
try:
if not arg1.__class__ is schedule_node:
arg1 = schedule_node(arg1)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_node_get_ancestor_child_position(arg0.ptr, arg1.ptr)
if res < 0:
raise
return int(res)
def get_ancestor_child_position(arg0, arg1):
return arg0.ancestor_child_position(arg1)
def child(arg0, arg1):
try:
if not arg0.__class__ is schedule_node:
arg0 = schedule_node(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_node_child(isl.isl_schedule_node_copy(arg0.ptr), arg1)
obj = schedule_node(ctx=ctx, ptr=res)
return obj
def child_position(arg0):
try:
if not arg0.__class__ is schedule_node:
arg0 = schedule_node(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_schedule_node_get_child_position(arg0.ptr)
if res < 0:
raise
return int(res)
def get_child_position(arg0):
return arg0.child_position()
def every_descendant(arg0, arg1):
try:
if not arg0.__class__ is schedule_node:
arg0 = schedule_node(arg0)
except:
raise
exc_info = [None]
fn = CFUNCTYPE(c_int, c_void_p, c_void_p)
def cb_func(cb_arg0, cb_arg1):
cb_arg0 = schedule_node(ctx=arg0.ctx, ptr=isl.isl_schedule_node_copy(cb_arg0))
try:
res = arg1(cb_arg0)
except BaseException as e:
exc_info[0] = e
return -1
return 1 if res else 0
cb = fn(cb_func)
ctx = arg0.ctx
res = isl.isl_schedule_node_every_descendant(arg0.ptr, cb, None)
if exc_info[0] is not None:
raise exc_info[0]
if res < 0:
raise
return bool(res)
def first_child(arg0):
try:
if not | |
Greaves": "",
# "Ebonwood Helmet": "",
# "Ebonwood Breastplate": "",
# "Ebonwood Greaves": "",
# "Rich Mahogany Helmet": "",
# "Rich Mahogany Breastplate": "",
# "Rich Mahogany Greaves": "",
# "Pearlwood Helmet": "",
# "Pearlwood Breastplate": "",
# "Pearlwood Greaves": "",
# "Amethyst Staff": "",
# "Topaz Staff": "",
# "Sapphire Staff": "",
# "Emerald Staff": "",
# "Ruby Staff": "",
# "Diamond Staff": "",
# "Grass Wall": "",
# "Jungle Wall": "",
# "Flower Wall": "",
# "Jetpack": "",
# "Butterfly Wings": "",
# "Cactus Wall": "",
# "Cloud": "",
# "Cloud Wall": "",
# "Seaweed": "",
# "Rune Hat": "",
# "Rune Robe": "",
# "Mushroom Spear": "",
# "Terra Blade": "",
# "Grenade Launcher": "",
# "Rocket Launcher": "",
# "Proximity Mine Launcher": "",
# "Fairy Wings": "",
# "Slime Block": "",
# "Flesh Block": "",
# "Mushroom Wall": "",
# "Rain Cloud": "",
# "Bone Block": "",
# "Frozen Slime Block": "",
# "Bone Block Wall": "",
# "Slime Block Wall": "",
# "Flesh Block Wall": "",
# "Rocket I": "",
# "Rocket II": "",
# "Rocket III": "",
# "Rocket IV": "",
# "Asphalt Block": "",
# "Cobalt Pickaxe": "",
# "Mythril Pickaxe": "",
# "Adamantite Pickaxe": "",
# "Clentaminator": "",
# "Green Solution": "",
# "Blue Solution": "",
# "Purple Solution": "",
# "Dark Blue Solution": "",
# "Red Solution": "",
# "Harpy Wings": "",
# "Bone Wings": "",
# "Hammush": "",
# "Nettle Burst": "",
# "Ankh Banner": "",
# "Snake Banner": "",
# "Omega Banner": "",
# "Crimson Helmet": "",
# "Crimson Scalemail": "",
# "Crimson Greaves": "",
# "Blood Butcherer": "",
# "Tendon Bow": "",
# "Flesh Grinder": "",
# "Deathbringer Pickaxe": "",
# "Blood Lust Cluster": "",
# "The Undertaker": "",
# "The Meatball": "",
# "The Rotted Fork": "",
# "Eskimo Hood": "",
# "Eskimo Coat": "",
# "Eskimo Pants": "",
# "Living Wood Chair": "",
# "Cactus Chair": "",
# "Bone Chair": "",
# "Flesh Chair": "",
# "Mushroom Chair": "",
# "Bone Work Bench": "",
# "Cactus Work Bench": "",
# "Flesh Work Bench": "",
# "Mushroom Work Bench": "",
# "Slime Work Bench": "",
# "Cactus Door": "",
# "Flesh Door": "",
# "Mushroom Door": "",
# "Living Wood Door": "",
# "Bone Door": "",
# "Flame Wings": "",
# "Frozen Wings": "",
# "Spectre Wings": "",
# "Sunplate Block": "",
# "Disc Wall": "",
# "Skyware Chair": "",
# "Bone Table": "",
# "Flesh Table": "",
# "Living Wood Table": "",
# "Skyware Table": "",
# "Living Wood Chest": "",
# "Living Wood Wand": "",
# "Purple Ice Block": "",
# "Pink Ice Block": "",
# "Red Ice Block": "",
# "Crimstone Block": "",
# "Skyware Door": "",
# "Skyware Chest": "",
# "Steampunk Hat": "",
# "Steampunk Shirt": "",
# "Steampunk Pants": "",
# "Bee Hat": "",
# "Bee Shirt": "",
# "Bee Pants": "",
# "World Banner": "",
# "Sun Banner": "",
# "Gravity Banner": "",
# "Pharaoh's Mask": "",
# "Actuator": "",
# "Blue Wrench": "",
# "Green Wrench": "",
# "Blue Pressure Plate": "",
# "Yellow Pressure Plate": "",
# "Discount Card": "",
# "Lucky Coin": "",
# "Unicorn on a Stick": "",
# "Sandstorm in a Bottle": "",
# "Boreal Wood Sofa": "",
# "Beach Ball": "",
# "Charm of Myths": "",
# "Moon Shell": "",
# "Star Veil": "",
# "Water Walking Boots": "",
# "Tiara": "",
# "Pharaoh's Robe": "",
# "Green Cap": "",
# "Mushroom Cap": "",
# "Tam O' Shanter": "",
# "Mummy Mask": "",
# "Mummy Shirt": "",
# "Mummy Pants": "",
# "Cowboy Hat": "",
# "Cowboy Jacket": "",
# "Cowboy Pants": "",
# "Pirate Hat": "",
# "Pirate Shirt": "",
# "Pirate Pants": "",
# "Viking Helmet": "",
# "Crimtane Ore": "",
# "Cactus Sword": "",
# "Cactus Pickaxe": "",
# "Ice Brick": "",
# "Ice Brick Wall": "",
# "Adhesive Bandage": "",
# "Armor Polish": "",
# "Bezoar": "",
# "Blindfold": "",
# "Fast Clock": "",
# "Megaphone": "",
# "Nazar": "",
# "Vitamins": "",
# "Trifold Map": "",
# "Cactus Helmet": "",
# "Cactus Breastplate": "",
# "Cactus Leggings": "",
# "Power Glove": "",
# "Lightning Boots": "",
# "Sun Stone": "",
# "Moon Stone": "",
# "Armor Bracing": "",
# "Medicated Bandage": "",
# "The Plan": "",
# "Countercurse Mantra": "",
# "Coin Gun": "",
# "Lava Charm": "",
# "Obsidian Water Walking Boots": "",
# "Lava Waders": "",
# "Pure Water Fountain": "",
# "Desert Water Fountain": "",
# "Shadewood": "",
# "Shadewood Door": "",
# "Shadewood Platform": "",
# "Shadewood Chest": "",
# "Shadewood Chair": "",
# "Shadewood Work Bench": "",
# "Shadewood Table": "",
# "Shadewood Dresser": "",
# "Shadewood Piano": "",
# "Shadewood Bed": "",
# "Shadewood Sword": "",
# "Shadewood Hammer": "",
# "Shadewood Bow": "",
# "Shadewood Helmet": "",
# "Shadewood Breastplate": "",
# "Shadewood Greaves": "",
# "Shadewood Wall": "",
# "Cannon": "",
# "Cannonball": "",
# "Flare Gun": "",
# "Flare": "",
# "Bone Wand": "",
# "Leaf Wand": "",
# "Flying Carpet": "",
# "Avenger Emblem": "",
# "Mechanical Glove": "",
# "Land Mine": "",
# "Paladin's Shield": "",
# "Web Slinger": "",
# "Jungle Water Fountain": "",
# "Icy Water Fountain": "",
# "Corrupt Water Fountain": "",
# "Crimson Water Fountain": "",
# "Hallowed Water Fountain": "",
# "Blood Water Fountain": "",
# "Umbrella": "",
# "Chlorophyte Ore": "",
# "Steampunk Wings": "",
# "Snowball": "",
# "Ice Skates": "",
# "Snowball Launcher": "",
# "Web Covered Chest": "",
# "Climbing Claws": "",
# "Ancient Iron Helmet": "",
# "Ancient Gold Helmet": "",
# "Ancient Shadow Helmet": "",
# "Ancient Shadow Scalemail": "",
# "Ancient Shadow Greaves": "",
# "Ancient Necro Helmet": "",
# "Ancient Cobalt Helmet": "",
# "Ancient Cobalt Breastplate": "",
# "Ancient Cobalt Leggings": "",
# "Black Belt": "",
# "Boomstick": "",
# "Rope": "",
# "Campfire": "",
# "Marshmallow": "",
# "Marshmallow on a Stick": "",
# "Cooked Marshmallow": "",
# "Red Rocket": "",
# "Green Rocket": "",
# "Blue Rocket": "",
# "Yellow Rocket": "",
# "Ice Torch": "",
# "Shoe Spikes": "",
# "Tiger Climbing Gear": "",
# "Tabi": "",
# "Pink Eskimo Hood": "",
# "Pink Eskimo Coat": "",
# "Pink Eskimo Pants": "",
# "Pink Thread": "",
# "Mana Regeneration Band": "",
# "Sandstorm in a Balloon": "",
# "Master Ninja Gear": "",
"Rope Coil": "rope coil is made with 10 rope",
# "Blowgun": "",
# "Blizzard in a Bottle": "",
"Frostburn Arrow": "made with 10 wooden arrows and 1 ice torch",
# "Pickaxe Axe": "",
# "Cobalt Waraxe": "",
# "Mythril Waraxe": "",
# "Adamantite Waraxe": "",
# "Eater's Bone": "",
# "Blend-O-Matic": "",
# "Meat Grinder": "",
# "Extractinator": "",
# "Solidifier": "",
# "Amber": "",
# "Confetti Gun": "",
# "Chlorophyte Mask": "",
# "Chlorophyte Helmet": "",
# "Chlorophyte Headgear": "",
# "Chlorophyte Plate Mail": "",
# "Chlorophyte Greaves": "",
# "Chlorophyte Bar": "",
# "Red Dye": "",
# "Orange Dye": "",
# "Yellow Dye": "",
# "Lime Dye": "",
# "Green Dye": "",
# "Teal Dye": "",
# "Cyan Dye": "",
# "Sky Blue Dye": "",
# "Blue Dye": "",
# "Purple Dye": "",
# "Violet Dye": "",
# "Pink Dye": "",
# "Red and Black Dye": "",
# "Orange and Black Dye": "",
# "Yellow and Black Dye": "",
# "Lime and Black Dye": "",
# "Green and Black Dye": "",
# "Teal and Black Dye": "",
# "Cyan and Black Dye": "",
# "Sky Blue and Black Dye": "",
# "Blue and Black Dye": "",
# "Purple and Black Dye": "",
# "Violet and Black Dye": "",
# "Pink and Black Dye": "",
# "Flame Dye": | |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-11-29 17:48
import json
from typing import Union, List, Optional, Dict, Any, Tuple
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from hanlp_common.document import Document
try:
# noinspection PyUnresolvedReferences
import requests
def _post(url, form: Dict[str, Any], headers: Dict[str, Any], timeout=10) -> str:
response = requests.post(url, json=form, headers=headers, timeout=timeout)
if response.status_code != 200:
raise HTTPError(url, response.status_code, response.text, response.headers, None)
return response.text
except ImportError:
def _post(url, form: Dict[str, Any], headers: Dict[str, Any], timeout=10) -> str:
request = Request(url, json.dumps(form).encode())
for k, v in headers.items():
request.add_header(k, v)
return urlopen(request, timeout=timeout).read().decode()
class HanLPClient(object):
def __init__(self, url: str, auth: str = None, language=None, timeout=10) -> None:
"""
Args:
url (str): An API endpoint to a service provider.
auth (str): An auth key licenced from a service provider.
language (str): The default language for each :func:`~hanlp_restful.HanLPClient.parse` call.
Contact the service provider for the list of languages supported.
Conventionally, ``zh`` is used for Chinese and ``mul`` for multilingual.
Leave ``None`` to use the default language on server.
timeout (int): Maximum waiting time in seconds for a request.
"""
super().__init__()
self._language = language
self._timeout = timeout
self._url = url
if auth is None:
import os
auth = os.getenv('HANLP_AUTH', None)
self._auth = auth
def parse(self,
text: Union[str, List[str]] = None,
tokens: List[List[str]] = None,
tasks: Optional[Union[str, List[str]]] = None,
skip_tasks: Optional[Union[str, List[str]]] = None,
language: str = None,
) -> Document:
"""
Parse a piece of text.
Args:
text: A document (str), or a list of sentences (List[str]).
tokens: A list of sentences where each sentence is a list of tokens.
tasks: The tasks to predict.
skip_tasks: The tasks to skip.
language: The language of input text or tokens. ``None`` to use the default language on server.
Returns:
A :class:`~hanlp_common.document.Document`.
Raises:
HTTPError: Any errors happening on the Internet side or the server side. Refer to the ``code`` and ``msg``
of the exception for more details. A list of common errors :
- ``400 Bad Request`` indicates that the server cannot process the request due to a client
fault (e.g., text too long, language unsupported).
- ``401 Unauthorized`` indicates that the request lacks **valid** ``auth`` credentials for the API.
- ``422 Unprocessable Entity`` indicates that the content type of the request entity is not in
proper json format.
- ``429 Too Many Requests`` indicates the user has sent too many requests in a given
amount of time ("rate limiting").
"""
assert text or tokens, 'At least one of text or tokens has to be specified.'
response = self._send_post_json(self._url + '/parse', {
'text': text,
'tokens': tokens,
'tasks': tasks,
'skip_tasks': skip_tasks,
'language': language or self._language
})
return Document(response)
def __call__(self,
text: Union[str, List[str]] = None,
tokens: List[List[str]] = None,
tasks: Optional[Union[str, List[str]]] = None,
skip_tasks: Optional[Union[str, List[str]]] = None,
language: str = None,
) -> Document:
"""
A shortcut of :meth:`~hanlp_restful.HanLPClient.parse`.
"""
return self.parse(text, tokens, tasks, skip_tasks, language)
def about(self) -> Dict[str, Any]:
"""Get the information about server and your client.
Returns:
A dict containing your rate limit and server version etc.
"""
info = self._send_get_json(self._url + '/about', {})
return Document(info)
def _send_post(self, url, form: Dict[str, Any]):
request = Request(url, json.dumps(form).encode())
self._add_headers(request)
return self._fire_request(request)
def _fire_request(self, request):
return urlopen(request, timeout=self._timeout).read().decode()
def _send_post_json(self, url, form: Dict[str, Any]):
headers = dict()
if self._auth:
headers['Authorization'] = f'Basic {self._auth}'
return json.loads(_post(url, form, headers, self._timeout))
def _send_get(self, url, form: Dict[str, Any]):
request = Request(url + '?' + urlencode(form))
self._add_headers(request)
return self._fire_request(request)
def _add_headers(self, request):
if self._auth:
request.add_header('Authorization', f'Basic {self._auth}')
def _send_get_json(self, url, form: Dict[str, Any]):
return json.loads(self._send_get(url, form))
def text_style_transfer(self, text: Union[str, List[str]], target_style: str, language: str = None) \
-> Union[str, List[str]]:
""" Text style transfer aims to change the style of the input text to the target style while preserving its
content.
Args:
text: Source text.
target_style: Target style.
language: The language of input text. ``None`` to use the default language.
Returns:
Text or a list of text of the target style.
Examples::
HanLP.text_style_transfer(['国家对中石油抱有很大的期望.', '要用创新去推动高质量的发展。'],
target_style='gov_doc')
# Output:
[
'国家对中石油寄予厚望。',
'要以创新驱动高质量发展。'
]
HanLP.text_style_transfer('我看到了窗户外面有白色的云和绿色的森林',
target_style='modern_poetry')
# Output:
'我看见窗外的白云绿林'
"""
response = self._send_post_json(self._url + '/text_style_transfer',
{'text': text, 'target_style': target_style,
'language': language or self._language})
return response
def semantic_textual_similarity(self, text: Union[Tuple[str, str], List[Tuple[str, str]]], language: str = None) \
-> Union[float, List[float]]:
""" Semantic textual similarity deals with determining how similar two pieces of texts are.
Args:
text: A pair or pairs of text.
language: The language of input text. ``None`` to use the default language.
Returns:
Similarities.
Examples::
HanLP.semantic_textual_similarity([
('看图猜一电影名', '看图猜电影'),
('无线路由器怎么无线上网', '无线上网卡和无线路由器怎么用'),
('北京到上海的动车票', '上海到北京的动车票'),
])
# Output:
[
0.9764469, # Similarity of ('看图猜一电影名', '看图猜电影')
0.0, # Similarity of ('无线路由器怎么无线上网', '无线上网卡和无线路由器怎么用')
0.0034587 # Similarity of ('北京到上海的动车票', '上海到北京的动车票')
]
"""
response = self._send_post_json(self._url + '/semantic_textual_similarity',
{'text': text, 'language': language or self._language})
return response
def coreference_resolution(self, text: Optional[str] = None, tokens: Optional[List[List[str]]] = None,
speakers: Optional[List[str]] = None, language: Optional[str] = None) -> Union[
Dict[str, Union[List[str], List[List[Tuple[str, int, int]]]]], List[List[Tuple[str, int, int]]]]:
r""" Coreference resolution is the task of clustering mentions in text that refer to the same underlying
real world entities.
Args:
text: A piece of text, usually a document without tokenization.
tokens: A list of sentences where each sentence is a list of tokens.
speakers: A list of speakers where each speaker is a ``str`` representing the speaker's ID, e.g., ``Tom``.
language: The language of input text. ``None`` to use the default language.
Returns:
When ``text`` is specified, return the clusters and tokens. Otherwise just the clusters, In this case, you need to ``sum(tokens, [])`` in order to match the span indices with tokens
Examples::
HanLP.coreference_resolution('我姐送我她的猫。我很喜欢它。')
# Output:
{'clusters': [
[['我', 0, 1], ['我', 3, 4], ['我', 8, 9]], # 指代说话人
[['我姐', 0, 2], ['她', 4, 5]], # 指代说话人的姐姐
[['她的猫', 4, 7], ['它', 11, 12]]], # 指代说话人的姐姐的猫
'tokens': ['我', '姐', '送', '我', '她', '的', '猫', '。',
'我', '很', '喜欢', '它', '。']}
HanLP.coreference_resolution(
tokens=[['我', '姐', '送', '我', '她', '的', '猫', '。'],
['我', '很', '喜欢', '它', '。']])
# Output:
[
[['我', 0, 1], ['我', 3, 4], ['我', 8, 9]], # 指代说话人
[['我姐', 0, 2], ['她', 4, 5]], # 指代说话人的姐姐
[['她的猫', 4, 7], ['它', 11, 12]]], # 指代说话人的姐姐的猫
.. image:: https://file.hankcs.com/img/coref_demo_small.png
:alt: Coreference resolution visualization
"""
response = self._send_post_json(self._url + '/coreference_resolution',
{'text': text, 'tokens': tokens, 'speakers': speakers,
'language': language or self._language})
return response
def tokenize(self, text: Union[str, List[str]], coarse: Optional[bool] = None, language=None) -> List[List[str]]:
""" Split a document into sentences and tokenize them. Note that it is always faster to tokenize a whole
document than to tokenize each sentence one by one. So avoid calling this method sentence by sentence but put
sentences into a ``list`` and pass them to the ``text`` argument.
Args:
text: A document (``str``), or a list of sentences (``List[str]``).
coarse: Whether to perform coarse-grained or fine-grained tokenization.
language: The language of input text. ``None`` to use the default language.
Returns:
A list of tokenized sentences.
Examples::
# Avoid tokenizing sentence by sentence, it is expensive:
HanLP.tokenize('商品和服务。')
[['商品', '和', '服务', '。']]
HanLP.tokenize('阿婆主来到北京立方庭参观自然语义科技公司')
[['阿婆主', '来到', '北京', '立方庭', '参观', '自然', '语义', '科技', '公司']]
# Instead, the following codes are much faster:
HanLP.tokenize('商品和服务。阿婆主来到北京立方庭参观自然语义科技公司')
[['商品', '和', '服务', '。'],
['阿婆主', '来到', '北京', '立方庭', '参观', '自然', '语义', '科技', '公司']]
# To tokenize with coarse-grained standard:
HanLP.tokenize('商品和服务。阿婆主来到北京立方庭参观自然语义科技公司', coarse=True)
[['商品', '和', '服务', '。'],
['阿婆主', '来到', '北京', '立方庭', '参观', '自然语义科技公司']]
# To tokenize pre-segmented sentences:
HanLP.tokenize(['商品和服务。', '当下雨天地面积水分外严重'])
[['商品', '和', '服务', '。'],
['当', '下雨天', '地面', '积水', '分', '外', '严重']]
# Multilingual tokenization by specifying language='mul':
HanLP.tokenize(
['In 2021, HanLPv2.1 delivers state-of-the-art multilingual NLP techniques
'to production environment.',
'2021年、HanLPv2.1は次世代の最先端多言語NLP技術を本番環境に導入します。',
'2021年 HanLPv2.1为生产环境带来次世代最先进的多语种NLP技术。'], language='mul')
[['In', '2021', ',', 'HanLPv2.1', 'delivers', 'state-of-the-art', 'multilingual',
'NLP', 'techniques', 'to', 'production', 'environment', '.'],
['2021', '年', '、', 'HanLPv2.1', 'は', '次', '世代', 'の', '最', '先端', '多',
'言語', 'NLP', '技術', 'を', '本番', '環境', 'に', '導入', 'します', '。'],
['2021', '年', 'HanLPv2.1', '为', '生产', '环境', '带来', '次世代', '最', '先进的',
'多', '语种', 'NLP', '技术', '。']]
"""
language = language or self._language
if coarse and language and language != | |
is None and not r.id:
resource.configure(editable = False,
deletable = False
)
# JS to show/hide Cook Island fields
s3.scripts.append("/%s/static/themes/DRRPP/js/drrpp.js" % current.request.application)
if r.method == "read":
table_pl = s3db.project_location
table_l = s3db.gis_location
countries = [row.name for row in
db((table_pl.project_id == r.record.id) &
(table_pl.location_id == table_l.id)
).select(table_l.name)
]
if not ("Cook Islands" in countries and len(countries) == 1):
s3db.project_drrpp.L1.readable = False
s3db.project_drrpp.pifacc.readable = False
s3db.project_drrpp.jnap.readable = False
# Filter Options
project_hfa_opts = s3db.project_hfa_opts()
hfa_options = dict((key, "HFA %s" % key)
for key in project_hfa_opts)
#hfa_options[None] = NONE # to search NO HFA
project_rfa_opts = s3db.project_rfa_opts()
rfa_options = dict((key, "RFA %s" % key)
for key in project_rfa_opts)
#rfa_options[None] = NONE # to search NO RFA
project_pifacc_opts = s3db.project_pifacc_opts()
pifacc_options = dict((key, "PIFACC %s" % key)
for key in project_pifacc_opts)
#pifacc_options[None] = NONE # to search NO PIFACC
project_jnap_opts = s3db.project_jnap_opts()
jnap_options = dict((key, "JNAP %s" % key)
for key in project_jnap_opts)
#jnap_options[None] = NONE # to search NO JNAP
# Filter widgets
from s3 import S3TextFilter, S3OptionsFilter, get_s3_filter_opts
filter_widgets = [
S3TextFilter(["name",
"code",
"description",
"location.location_id",
"hazard.name",
"theme.name",
],
label = T("Search Projects"),
comment = T("Search for a Project by name, code, or description."),
),
S3OptionsFilter("status_id",
label = T("Status"),
cols = 4,
),
S3OptionsFilter("location.location_id",
label = T("Country"),
cols = 3,
hidden = True,
),
#S3OptionsFilter("drrpp.L1",
# label = T("Cook Islands"),
# cols = 3,
# hidden = True,
# ),
S3OptionsFilter("hazard.id",
label = T("Hazard"),
options = lambda: \
get_s3_filter_opts("project_hazard",
translate=True),
help_field = s3db.project_hazard_help_fields,
cols = 4,
hidden = True,
),
S3OptionsFilter("theme.id",
label = T("Theme"),
options = lambda: \
get_s3_filter_opts("project_theme",
translate=True),
help_field = s3db.project_theme_help_fields,
cols = 4,
# Don't group
size = None,
hidden = True,
),
S3OptionsFilter("drr.hfa",
label = T("HFA"),
options = hfa_options,
help_field = project_hfa_opts,
cols = 5,
hidden = True,
),
S3OptionsFilter("drrpp.rfa",
label = T("RFA"),
options = rfa_options,
help_field = project_rfa_opts,
cols = 6,
hidden = True,
),
S3OptionsFilter("drrpp.pifacc",
label = T("PIFACC"),
options = pifacc_options,
help_field = project_pifacc_opts,
cols = 6,
hidden = True,
),
S3OptionsFilter("drrpp.jnap",
label = T("JNAP"),
options = jnap_options,
help_field = project_jnap_opts,
cols = 6,
hidden = True,
),
S3OptionsFilter("organisation_id",
label = T("Lead Organization"),
cols = 3,
hidden = True,
),
S3OptionsFilter("partner.organisation_id",
label = T("Partners"),
cols = 3,
hidden = True,
),
S3OptionsFilter("donor.organisation_id",
label = T("Donors"),
cols = 3,
hidden = True,
)
]
resource.configure(filter_widgets=filter_widgets)
return True
s3.prep = custom_prep
# Custom Crud Form
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox
crud_form = S3SQLCustomForm(
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
"drrpp.duration",
S3SQLInlineComponent(
"location",
label = T("Countries"),
fields = ["location_id"],
orderby = "location_id$name",
render_list = True
),
"drrpp.L1",
S3SQLInlineComponentCheckbox(
"hazard",
label = T("Hazards"),
field = "hazard_id",
option_help = "comments",
cols = 4,
),
S3SQLInlineComponentCheckbox(
"theme",
label = T("Themes"),
field = "theme_id",
option_help = "comments",
cols = 3,
),
"objectives",
"drrpp.activities",
# Outputs
S3SQLInlineComponent(
"output",
label = T("Outputs"),
fields = ["name", "status"],
),
"drr.hfa",
"drrpp.rfa",
"drrpp.pifacc",
"drrpp.jnap",
"organisation_id",
# Partner Orgs
S3SQLInlineComponent(
"organisation",
name = "partner",
label = T("Partner Organizations"),
fields = ["organisation_id",
"comments", # NB This is labelled 'Role' in DRRPP
],
filterby = dict(field = "role",
options = [2, 9]),
default = {"role": 2}
),
# Donors
S3SQLInlineComponent(
"organisation",
name = "donor",
label = T("Donor(s)"),
fields = ["organisation_id",
"amount",
"currency",
],
filterby = dict(field = "role",
options = [3]),
default = {"role": 3}
),
"budget",
"drrpp.local_budget",
"drrpp.local_currency",
"drrpp.focal_person",
"drrpp.organisation_id",
"drrpp.email",
# Files
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = ["file", "comments"],
filterby = dict(field = "file",
options = "",
invert = True,
)
),
# Links
S3SQLInlineComponent(
"document",
name = "url",
label = T("Links"),
fields = ["url", "comments"],
filterby = dict(field = "url",
options = None,
invert = True,
)
),
"drrpp.parent_project",
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
subheadings = {1: "hazard",
2: "theme",
3: "objectives",
4: "drr_hfa",
5: "drrpp_rfa",
6: "drrpp_pifacc",
7: "drrpp_jnap",
8: "organisation_id",
},
)
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
def customise_project_framework_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Load normal model
table = s3db.project_framework
# Custom CRUD Strings
s3.crud_strings.project_framework.title_list = \
T("Policies & Strategies List")
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
output = standard_prep(r)
else:
output = True
if r.interactive:
# Don't show Update/Delete button on List View
if r.method is None:
s3db.configure("project_framework",
deletable = False,
editable = False,
insertable = False,
)
return output
s3.prep = custom_prep
return attr
settings.customise_project_framework_controller = customise_project_framework_controller
# -----------------------------------------------------------------------------
def customise_project_location_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Load normal model
table = s3db.project_location
# Custom Components
s3db.add_components("project_project",
project_drrpp = {"joinby": "project_id",
"multiple": False,
},
)
# Custom CRUD Strings
s3.crud_strings.project_location.title_map = T("Project Map")
# Custom Search Filters
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3TextFilter(["project_id$name",
"project_id$code",
"project_id$description",
#"location_id$name",
#"project_id$organisation.name",
#"project_id$organisation.acronym",
],
label=T("Search Projects"),
_class="filter-search",
),
S3OptionsFilter("project_id$status_id",
label = T("Status"),
widget = "groupedpts",
#widget = "multiselect",
cols = 3,
#hidden=True,
),
S3LocationFilter("location_id",
label = T("Country"),
levels = ("L0",),
widget = "groupedpts",
#widget = "multiselect",
cols = 3,
hidden = True,
),
S3OptionsFilter("project_id$hazard_project.hazard_id",
label = T("Hazard"),
widget = "groupedpts",
#widget = "multiselect",
cols = 4,
hidden = True,
),
S3OptionsFilter("project_id$theme_project.theme_id",
label = T("Theme"),
widget = "groupedpts",
#widget = "multiselect",
cols = 4,
hidden = True,
),
S3OptionsFilter("project_id$drr.hfa",
label = T("HFA"),
widget = "groupedpts",
#widget = "multiselect",
cols = 5,
hidden = True,
),
S3OptionsFilter("project_id$drrpp.rfa",
label = T("RFA"),
widget = "groupedpts",
#widget = "multiselect",
cols = 6,
hidden = True,
),
S3OptionsFilter("project_id$organisation_id",
label = T("Lead Organization"),
represent = "%(name)s",
widget = "groupedpts",
#widget = "multiselect",
cols = 3,
hidden = True,
),
S3OptionsFilter("project_id$partner.organisation_id",
label = T("Partners"),
represent = "%(name)s",
widget = "groupedpts",
#widget = "multiselect",
cols = 3,
hidden = True,
),
S3OptionsFilter("project_id$donor.organisation_id",
label = T("Donors"),
represent = "%(name)s",
widget = "groupedpts",
#widget = "multiselect",
cols = 3,
hidden = True,
),
]
s3db.configure("project_location",
filter_widgets = filter_widgets,
# Add CSS to default class better than patching
#map_submit = (T("Search"), "search-button"),
map_advanced = (T("Advanced Search"), T("Simple Search")),
)
return attr
settings.customise_project_location_controller = customise_project_location_controller
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
"""
Customise pr_person controller
@todo: SavedSearch deprecated,
re-implement with saved filters / S3Notify
"""
s3db = current.s3db
# Load normal model
table = s3db.pr_person
# Custom CRUD Strings
current.response.s3.crud_strings.pr_person.title_display = T("My Page")
# Customise saved search
#table = s3db.pr_saved_search
#table.url.label = T("Display Search")
#
#def url_represent(url):
# return TAG[""](
# A(T("List"),
# _href = url,
# _class = "action-btn"
# ),
# A(T("Matrix"),
# # @ToDo: Fix for S3Search deprecation
# _href = url.replace("search", "report"),
# _class = "action-btn"
# ),
# A(T("Chart"),
# # @ToDo: Fix for S3Search deprecation
# _href = url.replace("search", "report?chart=breakdown%3Arows"),
# _class = "action-btn"
# ),
# A(T("Map"),
# # @ToDo: Fix for S3Search deprecation
# _href = url.replace("project/search", "location/map"),
# _class = "action-btn"
# )
# )
#table.url.represent = url_represent
#
#s3db.configure("pr_saved_search",
# list_fields = ["name",
# "url",
# ]
# )
#
#attr["rheader"] = H3(T("Saved Searches"))
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller to just show Name field
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
output = standard_prep(r)
else:
output = True
if r.interactive and r.method == "create":
table = current.s3db.org_organisation
for field in table:
if field.name != "name":
field.readable = field.writable = False
return output
s3.prep = custom_prep
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# =============================================================================
# Enabled Modules
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module | |
# Find cutoff
if clim is None:
self.__cutoff = cutoff * np.max(vx ** 2 + vy ** 2)
else:
self.__cutoff = cutoff * clim[1]
# Plot
_, _, self.__plot = plot_vector(vx, vy, extent = extent, scale = scale, fig = self.__fig, ax = self.__ax, cmap = cmap, clim = clim, cutoff = cutoff)
# Updates a vector field
#
# vx: The x-component of the vectors
# vy: The y-component of the vectors
def update_vector(self, vx, vy):
update_plot_vector(self.__plot, vx, vy, scale = self.__scale, cutoff = self.__cutoff)
# Plot a vector stream
#
# vx: The vector components along the x axis to plot
# vy: The vector components along the y axis to plot
# extent: Used to label the axis must be given as [x_min, x_max, y_min, y_max]
# scale: Function to scale the values of the field
# cmap: The colour map to plot the vectors with
# clim: Array containing the (min, max) values in the colour map, these are the raw values of the vector lengths,
# not the scaled values, if None then it will find the scale automatially by the minimum and maximum
# values of the lengths
# density: How many stream lines should be drawn
# length: The minimum length of the lines (In some scaled coordinates)
def plot_streams(self, vx, vy, extent = [0, 1, 0, 1], scale = default_scale, cmap = "coolwarm", clim = None, density = 1, length = 1):
# Save scale
self.__scale = scale
# Plot
_, _, self.__plot = plot_streams(vx, vy, extent = extent, scale = scale, fig = self.__fig, ax = self.__ax, cmap = cmap, clim = clim, density = density, length = length)
# Updates a stream plot
#
# vx: The x-component of the vectors
# vy: The y-component of the vectors
def update_streams(self, vx, vy):
update_plot_streams(self.__plot, vx, vy, scale = self.__scale)
# A class to take samples of a simulation every timestep
#
# Sim: The simulation to sample from, it will automatically add this sampler to the sim
class sampler:
def __init__(self, Sim):
# Make sure it has gotten a simulation
if not isinstance(Sim, sim):
raise Exception(f"Sim has wrong type, it is {str(type(Sim)):s} but it should be {str(sim):s}")
# Set the simulation
self.sim = Sim
# Add the sampler to the simulation
self.sim.add_sampler(self)
# Initialise the data
self.data = []
self.t = []
# Take one sample
#
# Sim: The simulation the sample is to be taken from
def sample(self):
self.t.append(self.sim.get_t())
# Defines what to sample
def sample_data(self):
pass
# Retrieves all the samples stored
def get_samples(self):
return self.t, self.data
# A sampler which samples numbers each timestep
#
# Sim: The simulation to sample from, it will automatically add this sampler to the sim
class sampler_number(sampler):
# Plots the data with t on the x-axis
#
# fmt: The fmt data for the plot, this is the type of curve and colour
# title: The title of the plot
# xlabel: The xlabel of the plot
# ylabel: The ylabel of the plot
# label: The label of the curve
# legend: Set to True if you want a legend
# figsize: The figure size, if ax is given this is ignored
# dpi: The resolution of the plot, if ax is given this is ignored
# ax: The ax to plot on, if None it will create its own
def plot(self, fmt = "-", title = "", xlabel = "", ylabel = "", label = "", legend = False, figsize = (10, 10), dpi = 100, ax = None):
# Create the plot
if ax is None:
fig, ax = plt.subplots(figsize = figsize, dpi = dpi)
else:
fig = None
# Plot the data
ax.plot(self.t, self.data, fmt, label = label)
# Set labels
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Add legend
if legend is True:
ax.legend()
# Return the figure
return fig, ax
# Take one sample
def sample(self):
super().sample()
# Get the number
self.data.append(self.sample_data())
# A sampler which samples a field each timestep
#
# Sim: The simulation to sample from, it will automatically add this sampler to the sim
# Points: numpy array of all the points to sample from, the x,y,z-coordinates are in the first axis
# hat: An array defining the directions of the hat vector, it should have a shape of type
# Points.shape + (3,) or (3,) for constant vectors. Leave as None if sampling from a scalar field
# single: True if you don't include the N-dimension in the hat vector
class sampler_field(sampler):
def __init__(self, Sim, Points, hat = None, single = False):
super().__init__(Sim)
# Make sure points are of correct type
if not isinstance(Points, np.ndarray):
raise Exception(f"Points has wrong type, it is {str(type(Points)):s} but it should be {str(np.ndarray):s}")
# Save the points
self.points = Points
# Make sure the hat is None or an array
if not (isinstance(hat, np.ndarray) or hat is None):
raise Exception(f"hat has wrong type, it is {str(type(hat)):s} but it should be {str(np.ndarray):s} or None")
# Save the hat
self.hat = hat
# Make sure single is of correct type
if not isinstance(single, bool):
raise Exception(f"single has wrong type, it is {str(type(single)):s} but it should be {str(bool):s}")
# Save the single
self.single = single
# Takes a sample from the simulation
def sample(self):
super().sample()
# Sample from a scalar field
if self.hat is None:
self.data.append(self.sim.sample_values(self.sample_data(), self.points).copy())
# Sample from a vector
else:
self.data.append(self.sim.sample_vectors(self.sample_data(), self.points, self.hat, single = self.single).copy())
# Creates a video using the data it has samples
#
# Name: The name of the video file to be saved
# FPS: How many frames per second the video should have
# figsize: The size of the figure in
# dpi: The resolution of the figure
def make_video(self, Name, FPS = 30, figsize = np.array([10., 10.]), dpi = 100):
# Create the video object
self.video = video(Name, FPS = FPS, figsize = figsize, dpi = dpi)
# Start the video
self.start_video(self.t[0], self.data[0])
self.video.update()
# Create the video
for t, data in zip(self.t[1:], self.data[1:]):
self.update_video(t, data)
self.video.update()
# Finish the video
self.video.finish()
# Creates the first frame of a video
#
# t: The timestamp of the frame
# Data: The data for the frame
def start_video(self, t, Data):
pass
# Create the next frame of the video
#
# t: The timestamp of the frame
# Data: The data for the frame
def update_video(self, t, Data):
pass
# A sampler which samples a field in 2D
#
# Sim: The simulation to sample from, it will automatically add this sampler to the sim
# Points: numpy array of all the points to sample from, the x,y,z-coordinates are in the first axis
# hat: An array defining the directions of the hat vector, it should have a shape of type
# Points.shape + (3,) or (3,) for constant vectors. Leave as None if sampling from a scalar field
class sampler_field_scalar(sampler_field):
def __init__(self, Sim, Points, hat = None):
super().__init__(Sim, Points, hat = hat, single = True)
# Creates a video using the data it has sampled
#
# Name: The name of the video file to be saved
# FPS: How many frames per second the video should have
# figsize: The size of the figure in
# dpi: The resolution of the figure
# extent: Used to label the axis must be given as [x_min, x_max, y_min, y_max]
# scale: Function to scale the values of the field
# cmap: The colour map to plot the scalar field with
# clim: Array containing the (min, max) values in the colour map, these are the raw values of the field,
# not the scaled values, if None then it | |
to repartition")
@derived_from(pd.Series)
def fillna(self, value):
return self.map_partitions(M.fillna, value=value)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
return new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions)
def to_hdf(self, path_or_buf, key, mode='a', append=False, get=None, **kwargs):
""" Export frame to hdf file(s)
Export dataframe to one or multiple hdf5 files or nodes.
Exported hdf format is pandas' hdf table format only.
Data saved by this function should be read by pandas dataframe
compatible reader.
By providing a single asterisk in either the path_or_buf or key
parameters you direct dask to save each partition to a different file
or node (respectively). The asterisk will be replaced with a zero
padded partition number, as this is the default implementation of
name_function.
When writing to a single hdf node in a single hdf file, all hdf save
tasks are required to execute in a specific order, often becoming the
bottleneck of the entire execution graph. Saving to multiple nodes or
files removes that restriction (order is still preserved by enforcing
order on output, using name_function) and enables executing save tasks
in parallel.
Parameters
----------
path_or_buf: HDFStore object or string
Destination file(s). If string, can contain a single asterisk to
save each partition to a different file. Only one asterisk is
allowed in both path_or_buf and key parameters.
key: string
A node / group path in file, can contain a single asterisk to save
each partition to a different hdf node in a single file. Only one
asterisk is allowed in both path_or_buf and key parameters.
format: optional, default 'table'
Default hdf storage format, currently only pandas' 'table' format
is supported.
mode: optional, {'a', 'w', 'r+'}, default 'a'
``'a'``
Append: Add data to existing file(s) or create new.
``'w'``
Write: overwrite any existing files with new ones.
``'r+'``
Append to existing files, files must already exist.
append: optional, default False
If False, overwrites existing node with the same name otherwise
appends to it.
complevel: optional, 0-9, default 0
compression level, higher means better compression ratio and
possibly more CPU time. Depends on complib.
complib: {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel > 0 compress using this compression library when
possible
fletcher32: bool, default False
If True and compression is used, additionally apply the fletcher32
checksum.
get: callable, optional
A scheduler `get` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults
for the collections.
dask_kwargs: dict, optional
A dictionary of keyword arguments passed to the `get` function
used.
name_function: callable, optional, default None
A callable called for each partition that accepts a single int
representing the partition number. name_function must return a
string representation of a partition's index in a way that will
preserve the partition's location after a string sort.
If None, a default name_function is used. The default name_function
will return a zero padded string of received int. See
dask.utils.build_name_function for more info.
compute: bool, default True
If True, execute computation of resulting dask graph.
If False, return a Delayed object.
lock: bool, None or lock object, default None
In to_hdf locks are needed for two reasons. First, to protect
against writing to the same file from multiple processes or threads
simultaneously. Second, default libhdf5 is not thread safe, so we
must additionally lock on it's usage. By default if lock is None
lock will be determined optimally based on path_or_buf, key and the
scheduler used. Manually setting this parameter is usually not
required to improve performance.
Alternatively, you can specify specific values:
If False, no locking will occur. If True, default lock object will
be created (multiprocessing.Manager.Lock on multiprocessing
scheduler, Threading.Lock otherwise), This can be used to force
using a lock in scenarios the default behavior will be to avoid
locking. Else, value is assumed to implement the lock interface,
and will be the lock object used.
See Also
--------
dask.DataFrame.read_hdf: reading hdf files
dask.Series.read_hdf: reading hdf files
Examples
--------
Saving data to a single file:
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Saving data to multiple nodes:
>>> with pd.HDFStore('output.hdf') as fh:
... df.to_hdf(fh, '/data*')
... fh.keys() # doctest: +SKIP
['/data0', '/data1']
Or multiple files:
>>> df.to_hdf('output_*.hdf', '/data') # doctest: +SKIP
Saving multiple files with the multiprocessing scheduler and manually
disabling locks:
>>> df.to_hdf('output_*.hdf', '/data',
... get=dask.multiprocessing.get, lock=False) # doctest: +SKIP
"""
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, get=get, **kwargs)
def to_csv(self, filename, **kwargs):
"""Write DataFrame to a series of comma-separated values (csv) files
One filename per partition will be created. You can specify the
filenames in a variety of ways.
Use a globstring::
>>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP
The * will be replaced by the increasing sequence 0, 1, 2, ...
::
/path/to/data/export-0.csv
/path/to/data/export-1.csv
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP
::
/path/to/data/export-2015-01-01.csv
/path/to/data/export-2015-01-02.csv
...
You can also provide an explicit list of paths::
>>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP
>>> df.to_csv(paths) # doctest: +SKIP
Parameters
----------
filename : string
Path glob indicating the naming scheme for the output files
name_function : callable, default None
Function accepting an integer (partition index) and producing a
string to replace the asterisk in the given filename globstring.
Should preserve the lexicographic order of partitions
compression : string or None
String like 'gzip' or 'xz'. Must support efficient random access.
Filenames with extensions corresponding to known compression
algorithms (gz, bz2) will be compressed accordingly automatically
sep : character, default ','
Field delimiter for the output file
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
nanRep : None
deprecated, use na_rep
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of | |
= DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = 0.6j
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1.isclose(res2))
def test_rmul_scalar_npfloat64(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = numpy.float64(2.303)
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1.isclose(res2))
def test_rmul_scalar_npcomplex128(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = numpy.complex128(-1.5j + 7.7)
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1.isclose(res2))
def test_rmul_bad_multiplier(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
with self.assertRaises(TypeError):
op = "0.5" * op
def test_truediv_and_div_real(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.5
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res.isclose(correct))
# Test if done out of place
self.assertTrue(op.isclose(original))
def test_truediv_and_div_complex(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.6j
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res.isclose(correct))
# Test if done out of place
self.assertTrue(op.isclose(original))
def test_truediv_and_div_npfloat64(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.float64(2.303)
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res.isclose(correct))
# Test if done out of place
self.assertTrue(op.isclose(original))
def test_truediv_and_div_npcomplex128(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.complex128(566.4j + 0.3)
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res.isclose(correct))
# Test if done out of place
self.assertTrue(op.isclose(original))
def test_truediv_bad_divisor(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
with self.assertRaises(TypeError):
op = op / "0.5"
def test_itruediv_and_idiv_real(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.5
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op.isclose(correct))
# Test if done in-place
self.assertFalse(op.isclose(original))
def test_itruediv_and_idiv_complex(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.6j
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op.isclose(correct))
# Test if done in-place
self.assertFalse(op.isclose(original))
def test_itruediv_and_idiv_npfloat64(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.float64(2.3030)
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op.isclose(correct))
# Test if done in-place
self.assertFalse(op.isclose(original))
def test_itruediv_and_idiv_npcomplex128(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.complex128(12.3 + 7.4j)
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op.isclose(correct))
# Test if done in-place
self.assertFalse(op.isclose(original))
def test_itruediv_bad_divisor(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
with self.assertRaises(TypeError):
op /= "0.5"
def test_iadd_different_term(self):
term_a = ((1, 1), (3, 0), (8, 1))
term_b = ((1, 1), (3, 1), (8, 0))
a = DummyOperator1(term_a, 1.0)
a += DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], 0.5)
a += DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], 1.0)
def test_iadd_bad_addend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
op += "0.5"
def test_add(self):
term_a = ((1, 1), (3, 0), (8, 1))
term_b = ((1, 0), (3, 0), (8, 1))
a = DummyOperator1(term_a, 1.0)
b = DummyOperator1(term_b, 0.5)
res = a + b + b
self.assertEqual(len(res.terms), 2)
self.assertEqual(res.terms[term_a], 1.0)
self.assertEqual(res.terms[term_b], 1.0)
# Test out of place
self.assertTrue(a.isclose(DummyOperator1(term_a, 1.0)))
self.assertTrue(b.isclose(DummyOperator1(term_b, 0.5)))
def test_add_bad_addend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
_ = op + "0.5"
def test_sub(self):
term_a = ((1, 1), (3, 1), (8, 1))
term_b = ((1, 0), (3, 1), (8, 1))
a = DummyOperator1(term_a, 1.0)
b = DummyOperator1(term_b, 0.5)
res = a - b
self.assertEqual(len(res.terms), 2)
self.assertEqual(res.terms[term_a], 1.0)
self.assertEqual(res.terms[term_b], -0.5)
res2 = b - a
self.assertEqual(len(res2.terms), 2)
self.assertEqual(res2.terms[term_a], -1.0)
self.assertEqual(res2.terms[term_b], 0.5)
def test_sub_bad_subtrahend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
_ = op - "0.5"
def test_isub_different_term(self):
term_a = ((1, 1), (3, 1), (8, 0))
term_b = ((1, 0), (3, 1), (8, 1))
a = DummyOperator1(term_a, 1.0)
a -= DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], -0.5)
a -= DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], -1.0)
def test_isub_bad_addend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
op -= "0.5"
def test_neg(self):
op = DummyOperator1(((1, 1), (3, 1), (8, 1)), 0.5)
_ = -op
# out of place
self.assertTrue(op.isclose(DummyOperator1(((1, 1), (3, 1), (8, 1)),
0.5)))
correct = -1.0 * op
self.assertTrue(correct.isclose(-op))
def test_pow_square_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
squared = term ** 2
expected = DummyOperator1(ops + ops, coeff ** 2)
self.assertTrue(squared.isclose(term * term))
self.assertTrue(squared.isclose(expected))
def test_pow_zero_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
zerod = term ** 0
expected = DummyOperator1(())
self.assertTrue(expected.isclose(zerod))
def test_pow_one_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
self.assertTrue(term.isclose(term ** 1))
def test_pow_high_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
high = term ** 10
expected = DummyOperator1(ops * 10, coeff ** 10)
self.assertTrue(expected.isclose(high))
def test_pow_neg_error(self):
with self.assertRaises(ValueError):
DummyOperator1() ** -1
def test_pow_nonint_error(self):
with self.assertRaises(ValueError):
DummyOperator1('3 2^') ** 0.5
def test_compress_terms(self):
op = (DummyOperator1('3^ 1', 0.3 + 3e-11j) +
DummyOperator1('2^ 3', 5e-10) +
DummyOperator1('1^ 3', 1e-3))
op_compressed = (DummyOperator1('3^ 1', 0.3) +
DummyOperator1('1^ 3', 1e-3))
op.compress(1e-7)
self.assertTrue(op_compressed.isclose(op))
def test_str(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
self.assertEqual(str(op), "0.5 [1^ 3 8^]")
op2 = DummyOperator1((), 2)
self.assertEqual(str(op2), "2 []")
op3 = DummyOperator1()
self.assertEqual(str(op3), "0")
def test_rep(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
# Not necessary, repr could do something in addition
self.assertEqual(repr(op), str(op))
class SymbolicOperatorTest2(unittest.TestCase):
"""Test the subclass DummyOperator2."""
def test_init_defaults(self):
loc_op = DummyOperator2()
self.assertTrue(len(loc_op.terms) == 0)
def test_init_tuple(self):
coefficient = 0.5
loc_op = ((0, 'X'), (5, 'Y'), (6, 'Z'))
qubit_op = DummyOperator2(loc_op, coefficient)
self.assertTrue(len(qubit_op.terms) == 1)
self.assertTrue(qubit_op.terms[loc_op] == coefficient)
def test_init_list(self):
coefficient = 0.6j
loc_op = [(0, 'X'), (5, 'Y'), (6, 'Z')]
qubit_op = DummyOperator2(loc_op, coefficient)
self.assertTrue(len(qubit_op.terms) == 1)
self.assertTrue(qubit_op.terms[tuple(loc_op)] == coefficient)
def test_init_str(self):
qubit_op = DummyOperator2('X0 Y5 Z12', -1.)
correct = ((0, 'X'), (5, 'Y'), (12, 'Z'))
self.assertTrue(correct in qubit_op.terms)
self.assertTrue(qubit_op.terms[correct] == -1.0)
def test_init_long_str(self):
qubit_op = DummyOperator2(
'(-2.0+3.0j) [X0 Y1] +\n\n -1.0[ X2 Y3 ] - []', -1.)
correct = \
DummyOperator2('X0 Y1', complex(2., -3.)) + \
DummyOperator2('X2 Y3', 1.) + \
DummyOperator2('', 1.)
self.assertEqual(len((qubit_op-correct).terms), 0)
reparsed_op = DummyOperator2(str(qubit_op))
self.assertEqual(len((qubit_op-reparsed_op).terms), 0)
qubit_op = DummyOperator2('[X0 X1] + [Y0 Y1]')
correct = DummyOperator2('X0 X1') + DummyOperator2('Y0 Y1')
self.assertTrue(qubit_op.isclose(correct))
self.assertTrue(qubit_op.isclose(DummyOperator2(str(qubit_op))))
def test_init_str_identity(self):
qubit_op = DummyOperator2('', 2.)
self.assertTrue(len(qubit_op.terms) == 1)
self.assertTrue(() in qubit_op.terms)
self.assertAlmostEqual(qubit_op.terms[()], 2.)
def test_init_bad_term(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2(2)
def test_init_bad_coefficient(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2('X0', "0.5")
def test_init_bad_action(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2('Q0')
def test_init_bad_action_in_tuple(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2(((1, 'Q'),))
def test_init_bad_qubit_num_in_tuple(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2((("1", 'X'),))
def test_init_bad_tuple(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2(((0, 1, 'X'),))
def test_init_bad_str(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2('X')
def test_init_bad_qubit_num(self):
with self.assertRaises(ValueError):
qubit_op = DummyOperator2('X-1')
def test_isclose_abs_tol(self):
a = DummyOperator2('X0', -1.)
b = DummyOperator2('X0', -1.05)
c = DummyOperator2('X0', -1.11)
self.assertTrue(a.isclose(b, rel_tol=1e-14, abs_tol=0.1))
self.assertTrue(not a.isclose(c, rel_tol=1e-14, abs_tol=0.1))
a = DummyOperator2('X0', -1.0j)
b = DummyOperator2('X0', -1.05j)
c = DummyOperator2('X0', -1.11j)
self.assertTrue(a.isclose(b, rel_tol=1e-14, abs_tol=0.1))
self.assertTrue(not a.isclose(c, rel_tol=1e-14, abs_tol=0.1))
def test_compress(self):
a = DummyOperator2('X0', .9e-12)
self.assertTrue(len(a.terms) == 1)
a.compress()
self.assertTrue(len(a.terms) == 0)
a = DummyOperator2('X0', 1. + 1j)
a.compress(.5)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(a.terms[term] == 1. + 1j)
a = DummyOperator2('X0', 1.1 + 1j)
a.compress(1.)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(a.terms[term] == 1.1)
a = DummyOperator2('X0', 1.1 + 1j) + DummyOperator2('X1', 1.e-6j)
a.compress()
self.assertTrue(len(a.terms) == 2)
for term in a.terms:
self.assertTrue(isinstance(a.terms[term], complex))
a.compress(1.e-5)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(isinstance(a.terms[term], complex))
a.compress(1.)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(isinstance(a.terms[term], float))
def test_isclose_rel_tol(self):
a = DummyOperator2('X0', 1)
b = DummyOperator2('X0', 2)
self.assertTrue(a.isclose(b, rel_tol=2.5, abs_tol=0.1))
# Test symmetry
self.assertTrue(a.isclose(b, rel_tol=1, abs_tol=0.1))
self.assertTrue(b.isclose(a, rel_tol=1, abs_tol=0.1))
def test_isclose_zero_terms(self):
op = DummyOperator2(((1, 'Y'), (0, 'X')), -1j) * 0
self.assertTrue(op.isclose(
DummyOperator2((), 0.0), rel_tol=1e-12, abs_tol=1e-12))
self.assertTrue(DummyOperator2((), 0.0).isclose(
op, rel_tol=1e-12, abs_tol=1e-12))
def test_isclose_different_terms(self):
a = DummyOperator2(((1, 'Y'),), -0.1j)
b = DummyOperator2(((1, 'X'),), -0.1j)
self.assertTrue(a.isclose(b, rel_tol=1e-12, abs_tol=0.2))
self.assertTrue(not a.isclose(b, | |
<reponame>patriotemeritus/LO-PHI
"""
Class for controlling virtual machines using libvirt
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import multiprocessing
import logging
logger = logging.getLogger(__name__)
import time
import os
import shutil
from subprocess import call
# 3rd Party
try:
import libvirt
except:
logger.error("python-libvirt is not installed! (sudo apt-get install python-libvirt)")
# LO-PHI
import lophi.globals as G
from lophi.sensors.control import ControlSensor
from lophi.actuation.keypressgenerator import KeypressGeneratorVirtual
import lophi.actuation.rfb as RFB
# Mutex used for libvirt connections
libvirt_mutex = multiprocessing.Lock()
XML_NETWORK = """
<network>
<name>network-lophi</name>
<bridge name='lophi-virt' stp='off' delay='0' />
<mac address='aa:bb:cc:dd:ee:ff'/>
<ip address='192.168.1.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.1.2' end='192.168.1.254' />
</dhcp>
</ip>
</network>
"""
XML_NWFILTER = """
<filter name='isolated-lophi' chain='root'>
<rule action='drop' direction='in' priority='500'>
<mac match='no' srcmacaddr='$GATEWAY_MAC'/>
</rule>
</filter>
"""
class ControlSensorVirtual(ControlSensor):
"""
Control sensor for Virtual machines
"""
ISOLATED_NETWORK_CREATED = False
def __init__(self, vm_name, vm_type=None,isolated_network=True, **kargs):
"""
Initialize
"""
if os.getuid() != 0:
logger.error("You likely have to run libvirt as root.")
# Do we need to mutex these accesses?
self.REQUIRE_MUTEX = False
if "require_mutex" in kargs and kargs['require_mutex']:
self.REQUIRE_MUTEX = True
# Meta data
self.vm_name = vm_name
self.MACHINE_TYPE = vm_type
self.name = vm_name+"-ControlSensor"
# Should this be created on the isolated LO-PHI network?
self.ISOLATED_NETWORK = isolated_network
# See if our VM already exists
dom = self._connect()
# Dose our domain exist?
if dom is not None and vm_type is None:
# Let's determine the type of VM automatically
logger.debug("Detecting type of vm for %s"%self.vm_name)
from xml.dom.minidom import parseString
xml = dom.XMLDesc(0)
xml_dom = parseString(xml)
root_node = xml_dom.childNodes[0]
domain_type = root_node.getAttribute("type")
if domain_type == "kvm":
self.MACHINE_TYPE = G.MACHINE_TYPES.KVM
elif domain_type == "xen":
self.MACHINE_TYPE = G.MACHINE_TYPES.XEN
# If we are creating it from within LO-PHI make sure we use our
# isolated network
if self.ISOLATED_NETWORK and not self.ISOLATED_NETWORK_CREATED:
net = nwf = None
try:
# Create our filter and our network
try:
net = self._libvirt_conn.networkLookupByName("network-lophi")
except:
pass
try:
nwf = self._libvirt_conn.nwfilterLookupByName("isolated-lophi")
except:
pass
# Create our network and filter if they don't already exist
if net is None or net is "":
self._libvirt_conn.networkCreateXML(XML_NETWORK)
if nwf is None or nwf is "":
self._libvirt_conn.nwfilterDefineXML(XML_NWFILTER)
ControlSensorVirtual.ISOLATED_NETWORK_CREATED = True
logger.debug("Isolated network and rules created")
except:
logger.error("Could not create libvirt isolated network!")
pass
self._disconnect()
# Ensure we know what type of meachine we are working with
if self.MACHINE_TYPE is None:
logger.error("No machine type given for %s, defaulting to KVM"%self.vm_name)
self.MACHINE_TYPE = G.MACHINE_TYPES.KVM
ControlSensor.__init__(self)
def _connect(self):
"""
Create our libvirt connection
"""
# Get our mutex!
if self.REQUIRE_MUTEX:
libvirt_mutex.acquire()
# Open our libvirt connection
self._libvirt_conn = libvirt.open(None) # $LIBVIRT_DEFAULT_URI, or give a URI here
assert self._libvirt_conn, 'libVirt: Failed to open connection'
# logger
logger.debug("* Connecting %s" % self.vm_name)
# Try to lookup our domain to see if it exists and return it
try:
dom_tmp = self._libvirt_conn.lookupByName(self.vm_name)
return dom_tmp
except:
return None
pass
def _disconnect(self):
"""
Disconnect from libvirt
"""
# Close libvirt
self._libvirt_conn.close()
# Release our mutex!
if self.REQUIRE_MUTEX:
libvirt_mutex.release()
"""
Actuation functions
"""
def _get_vnc_port(self):
"""
Return the VNC port for this virtual machine through libvirt
"""
vnc_port = None
DOM = self._connect()
if DOM is None:
logger.error("VM %s was not found." % self.vm_name)
else:
# Extract the VMs XML config
xml_str = DOM.XMLDesc(0)
import xml.dom.minidom as xml
# Extract all graphics objects
dom = xml.parseString(xml_str)
graphics = dom.getElementsByTagName("graphics")
# Look for one of type="vnc" and extract its port
for g in graphics:
g_type = g.getAttribute("type")
if g_type == "vnc":
port = g.getAttribute("port")
vnc_port = int(port)
self._disconnect()
return vnc_port
def mouse_click(self,x,y,button=RFB.MOUSE_LEFT,double_click=False):
"""
This will move the mouse the specified (X,Y) coordinate and click
NOTE: Unfortuantely we have to use the VNC interface. Libvirt
doesn't have an exposed API for mouse functions
@param x: X coordinate on the screen
@param y: Y coordinate on the screen
@param button: Button mask for what to click
(0b1 - Left, 0b100 - Right)
@param double_dlick: Specifies a double click or single click
"""
vnc_host="localhost"
vnc_port=self._get_vnc_port()
if vnc_port is None:
logger.error("Could not detect a VNC port for %s"%self.name)
return False
# Use our RFB Client
vnc_client = RFB.RFBClient(vnc_host,vnc_port)
vnc_client.mouseMove(x ,y)
vnc_client.mouseClick(button,double_click=double_click)
time.sleep(.5)
return True
def mouse_wiggle(self, enabled):
""" This function randomly wiggles the mouse """
# We do not currently implement this for virtual machines
return True
def keypress_send(self, keypresses):
"""
Given a list of keypress instructions will emulate them on the SUT.
@param keypresses: List of lists of keypresses with SPECIAL/TEXT
identifiers.
E.g. [ [Type, [Keys] ], ... ]
"""
logger.debug("Sending key press")
# Start emulating
DOM = self._connect()
if DOM is None:
logger.error("Could not find VM (%s)" % self.vm_name)
else:
# Open Run Dialog
for line in keypresses:
cmd = line[0]
keys = line[1]
# Special key?
if cmd == G.SENSOR_CONTROL.KEY_SP_CMD:
DOM.sendKey(libvirt.VIR_KEYCODE_SET_LINUX, 0, keys, len(keys), 0)
# Are we sleeping?
elif cmd == G.SENSOR_CONTROL.KEY_SLEEP:
logger.debug("Key press: Sleeping for %d "%int(keys))
time.sleep(int(keys))
# Normal keys
else:
for c in keys:
if isinstance(c, list):
DOM.sendKey(libvirt.VIR_KEYCODE_SET_LINUX, 0, c, len(c), 0)
else:
DOM.sendKey(libvirt.VIR_KEYCODE_SET_LINUX, 0, [c], 1, 0)
time.sleep(G.SENSOR_CONTROL.SLEEP_INTER_KEY)
time.sleep(G.SENSOR_CONTROL.SLEEP_INTER_CMD)
self._disconnect()
def keypress_get_generator(self):
"""
Return a generator to convert scripts into a language this sensor
understands
@return: KeypressGenerator for virtual machines
"""
return KeypressGeneratorVirtual()
"""
Power functions
"""
def power_on(self):
"""
Power on the VM
"""
logger.debug("Starting up %s..." % self.vm_name)
DOM = self._connect()
if DOM is not None:
# Machine is already on if this is set
try:
DOM.create()
except:
pass
rtn = True
else:
logger.warning("%s does not exist." % self.vm_name)
rtn = False
self._disconnect()
return rtn
def power_off(self):
"""
Hard shutdown the machine
"""
logger.debug("* Destroying %s..." % self.vm_name)
try:
DOM = self._connect()
if DOM is not None:
DOM.destroy()
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
except:
logger.error("* Cannot destroy machine, %s"%self.vm_name)
def power_shutdown(self):
"""
Nice shutdown of the VM
"""
logger.debug("Shutting down %s..." % self.vm_name)
DOM = self._connect()
if DOM is not None:
DOM.shutdown()
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
def power_reset(self):
"""
Reset power on the VM
"""
logger.debug("Resetting %s..." % self.vm_name)
DOM = self._connect()
if DOM is not None:
DOM.reset(0)
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
def power_reboot(self):
"""
Reboot the VM
"""
logger.debug("Rebooting %s..." % self.vm_name)
DOM = self._connect()
if DOM is not None:
DOM.reboot(0)
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
def get_state(self):
"""
Return the current state of the VM
"""
DOM = self._connect()
if DOM is not None:
rtn = DOM.state(0)
else:
rtn = None
self._disconnect()
return rtn
def power_status(self):
"""
Return the status of the VM
"""
logger.debug("Getting power status of %s..." % self.vm_name)
state = self.get_state()
if state is None:
return G.SENSOR_CONTROL.POWER_STATUS.UNKNOWN
# For some reason a list is returned, seem to always be the same.
state = state[0]
# Running?
if state in [libvirt.VIR_DOMAIN_RUNNING,
libvirt.VIR_DOMAIN_BLOCKED,
libvirt.VIR_DOMAIN_PAUSED
]:
return G.SENSOR_CONTROL.POWER_STATUS.ON
# Off?
elif state in [libvirt.VIR_DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_SHUTOFF]:
return G.SENSOR_CONTROL.POWER_STATUS.OFF
# Unknown?
elif state in [libvirt.VIR_DOMAIN_NOSTATE,
libvirt.VIR_DOMAIN_CRASHED,
libvirt.VIR_DOMAIN_LAST]:
return G.SENSOR_CONTROL.POWER_STATUS.UNKNOWN
else:
logger.warning("%s does not exist." % self.vm_name)
return G.SENSOR_CONTROL.POWER_STATUS.UNKNOWN
"""
Machine control functions
"""
def machine_create(self, xml_config, paused=False):
"""
Creates a new Xen VM from the specified config file.
"""
logger.debug("Creating %s... (Paused=%s)" % (self.vm_name, paused))
DOM = self._connect()
# Is there a machine already created?
if DOM is not None:
logger.error("Tried to created %s, but a VM already exists." % self.vm_name)
else:
# Create our machine
if paused:
if self.MACHINE_TYPE == G.MACHINE_TYPES.KVM:
DOM = self._libvirt_conn.createXML(xml_config,
libvirt.VIR_DOMAIN_START_PAUSED)
else:
DOM = self._libvirt_conn.createXML(xml_config, 0)
DOM.suspend()
else:
logger.debug("Creating VM unpaused.")
DOM = self._libvirt_conn.createXML(xml_config, 0)
self._disconnect()
def machine_pause(self):
"""
Pause a machine
"""
logger.debug("Pausing %s..." % self.vm_name)
DOM = self._connect()
if DOM is not | |
of optimal hard
threshold for singular values. (Gavish & Donoho,
IEEE Transactions on Information Theory 60.8 (2014): 5040-5053.)
This only takes effect if auto_nuisance is True.
nureg_zscore: boolean, default: True
A flag to tell the algorithm whether data is z-scored before
estimating the number of nuisance regressor components necessary to
account for spatial noise correlation. It also determinie whether
the residual noise is z-scored before estimating the nuisance
regressors from residual.
This only takes effect if auto_nuisance is True.
nureg_method: string, naming a method from sklearn.decomposition.
'PCA', 'ICA', 'FA' or 'SPCA' are currently supported. Default: 'PCA'
The method to estimate the shared component in noise across voxels.
This only takes effect if auto_nuisance is True.
baseline_single: boolean. Default: False
A time course of constant 1 will be included to the nuisance
regressor for each participant. If baseline_single is set to False,
one such regressor is included for each fMRI run, but at the end of
fitting, a single component in beta0\_ will be computed as the average
of the weight maps corresponding to these regressors. This might
cause underestimation of noise variance.
If baseline_single is True, only one regressor of constant 1 will be
used for the whole dataset. This might be desirable if you
believe the average image intensity might not scale with the
same proportion for different voxels across scan. In other words,
it is possible that some part of the brain is more vulnerable to
change in baseline intensity due to facts such as
field inhomogeneity. Setting baseline_single to True will force the
nuisance regressors automatically estimated from residuals to
capture this. However, when each task condition only occurs in one
run and when the design matrix in each run sums together close to
a flat line, this option can cause the estimated similarity to be
extremely high between conditions occuring in the same run.
SNR_prior: string. Default: 'exp'
The type of prior for pseudo-SNR.
If set to 'exp', truncated exponential distribution with scale
parameter of 1 is imposed on pseudo-SNR.
If set to 'lognorm', a truncated log normal prior is imposed.
In this case, the standard deviation of log(SNR) is set
by the parameter logS_range.
If set to 'unif', a uniform prior in [0,1] is imposed.
In all these cases, SNR is numerically
marginalized on a grid of parameters. So the parameter SNR_bins
determines how accurate the numerical integration is. The more
number of bins are used, the more accurate the numerical
integration becomes.
In all the cases, the grids used for pseudo-SNR do not really
set an upper bound for SNR, because the real SNR is determined
by both pseudo-SNR and U, the shared covariance structure.
logS_range: float. Default: 1.0
The reasonable range of the spread of SNR in log scale.
This parameter only takes effect if SNR_prior is set to 'lognorm'.
It is effectively the `s` parameter of `scipy.stats.lognorm`,
or the standard deviation of the distribution in log scale.
logS_range specifies how variable you believe the SNRs
to vary across voxels in log scale.
This range should not be set too large, otherwise the fitting
may encounter numerical issue.
If it is set too small, the estimated SNR will turn to be too
close to each other and the estimated similarity matrix might
overfit to voxels of low SNR.
If you increase logS_range, it is recommended to increase
SNR_bins accordingly, otherwise the pseudo-SNR values evaluated might
be too sparse, causing the posterior pseudo-SNR estimations
to be clustered around the bins.
SNR_bins: integer. Default: 21
The number of bins used to numerically marginalize the pseudo-SNR
parameter. In general, you should try to choose a large number
to the degree that decreasing SNR_bins does not change the result
of fitting result. However, very large number of bins also causes
slower computation and larger memory consumption.
For SNR_prior='lognorm', the default value 21 is based on
the default value of logS_range=1.0 and bin width of 0.3 on log scale.
But it is also a reasonable choice for the other two options
for SNR_prior.
rho_bins: integer. Default: 20
The number of bins to divide the region of (-1, 1) for rho.
This only takes effect for fitting the marginalized version.
If set to 20, discrete numbers of {-0.95, -0.85, ..., 0.95} will
be used to numerically integrate rho from -1 to 1.
optimizer: str or callable. Default: 'BFGS'
The optimizer to use for minimizing cost function which
scipy.optimize.minimize can accept.
We use 'L-BFGS-B' as a default. Users can try other strings
corresponding to optimizer provided by scipy.optimize.minimize,
or a custom optimizer, such as 'L-BFGS-B' or 'CG'.
Note that BRSA fits a lot of parameters. So a chosen optimizer
should accept gradient (Jacobian) of the cost function. Otherwise
the fitting is likely to be unbarely slow. We do not calculate
Hessian of the objective function. So an optimizer which requires
Hessian cannot be used.
minimize_options: dictionary.
Default: {'gtol': 1e-4, 'disp': False,
'maxiter': 20}
This is the dictionary passed as the options argument to
scipy.optimize.minize which minimizes the cost function during
fitting. Notice that the minimization is performed for up to
n_iter times, with the nuisance regressor re-estimated each time.
So within each of the n_iter steps of fitting,
scipy.optimize.minize does not need to fully converge. The key
'maxiter' in this dictionary determines the maximum number of
iteration done by scipy.optimize.minimize within each of the n_iter
steps of fitting.
tol: float. Default: 1e-4.
Tolerance parameter passed to scipy.optimize.minimize. It is also
used for determining convergence of the alternating fitting
procedure.
random_state : RandomState or an int seed. Default: None
A random number generator instance to define the state of
the random permutations generator whenever the module
needs to generate random number (e.g., initial parameter
of the Cholesky factor).
anneal_speed: float. Default: 10
Annealing is introduced in fitting of the Cholesky
decomposition of the shared covariance matrix. The amount
of perturbation decays exponentially. This parameter sets
the ratio of the maximum number of iteration to the
time constant of the exponential.
anneal_speed=10 means by n_iter/10 iterations,
the amount of perturbation is reduced by 2.713 times.
Attributes
----------
U_ : numpy array, shape=[condition,condition].
The shared covariance matrix
L_ : numpy array, shape=[condition,condition].
The Cholesky factor of the shared covariance matrix
(lower-triangular matrix).
C_: numpy array, shape=[condition,condition].
The correlation matrix derived from the shared covariance matrix.
This is the estimated similarity matrix between neural patterns
to your task conditions. Notice that it is recommended that
you also check U\_, which is the covariance matrix underlying
this correlation matrix. In cases there is almost no response
to your task conditions, the diagonal values of U\_ would become
very small and C\_ might contain many correlation coefficients
close to 1 or -1. This might not reflect true strong correlation
or strong negative correlation, but a result of lack of
task-related neural activity, design matrix that does not match
true neural response, or not enough data.
It is also recommended to check nSNR\_ after mapping it back to
the brain. A "reasonable" map should at least have higher values
in gray matter in than white matter.
nSNR_ : list of numpy arrays, shape=[voxels,] for each subject in the list.
The pseuso-SNR of all voxels. If SNR_prior='lognormal',
the geometric mean of nSNR\_ would be approximately 1.
If SNR_prior='unif', all nSNR\_ would be in the range of (0,1).
If SNR_prior='exp' (default), the range of values would vary
depending on the data and SNR_bins, but many should have low
values with few voxels with high values.
Note that this attribute can not be interpreted as true SNR,
but the relative ratios between voxels indicate the contribution
of each voxel to the representational similarity structure.
sigma_ : list of numpy arrays, shape=[voxels,] for each subject.
The estimated standard deviation of the | |
<reponame>tied/jira-groovy-scripts-2<filename>script-risk-register/MonteCarloAfterMitigation.py
import sys, getopt
import numpy as np
import datetime
import locale
import platform
import time
import getpass
from random import randrange
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib import pylab
from pylab import *
from jira import JIRA
# prompt for Jira username
username = input("Please enter your Jira username: ")
# prompt for Jira password
password = getpass.getpass(prompt="Please enter your Jira password: ")
# Note that you'll need to have numpy, matplotlib, pylab, and jira packages installed (all are available on pip)
if platform.system() == 'Windows':
locale.setlocale( locale.LC_ALL, '' )
else:
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
def mybar(ax,x1,x2,y2):
Xbars = [[0., .3],[.7,.4]]
left,right = x1,x2
bottom,top = 0.0,y2
## ax.imshow(Xbars, interpolation='bicubic', cmap=cm.Blues,
## extent=(left, right, bottom, top), alpha=1)
return
# Format the date to the proper form of year, month, day
def format_date(x, pos=None):
# return pl.num2date(x).strftime('%Y-%m-%d')
return pylab.num2date(x).strftime('%b, %Y')
# Use a sorting method of the "data" to find the total cost at a specified confidence level "breakfraction"
def percentage(data,breakfraction):
breaknumber = int(breakfraction * len(data))
# data is a list of total costs for each iteration, sort from lowest cost to highest cost
data.sort() # sorts input from lowest to highest value
return data[breaknumber]
def xstr(s): # this is just to handle converting lists to strings when there might be some empty values
if s is None:
return ''
return str(s)
def montecarlorisk(num_trials,annual_escalation,subsystem,output_file):
## define output location; if variable output_file is true then output goes to test.txt in working directory
fhold = sys.stdout
if output_file:
f = open('./test.txt', 'w')
sys.stdout = f
#########################################################################################
###################### Some basic values ###############################
#########################################################################################
total_contingency = 81700.0 # total contingency in K$
nyears = 9 ## number of years with construction activity
date_start = "2014-06-01"
date_end = "2022-10-01"
date_commissioning_start = "2020-10-01"
date_base_year = "2013"
date_year_start = "2014"
date_year_end = "2022"
annual_esc = 1.0 + annual_escalation # convert annual fractional escalation to factor
yer = ['2013','2014','2015','2016','2017','2018','2019','2020','2021','2022']
final_totals_distribution = []
#cost_lowest = np.zeros(1000)
#cost_expected = np.zeros(1000)
#cost_highest = np.zeros(1000)
subsystem = subsystem.upper()
if subsystem == 'DOE':
fundingstring = " AND component in ('Camera', 'DOE Funded Commissioning', 'DOE Funded Operations') "
projectname = "LSST DOE"
elif subsystem =='NSF':
fundingstring = " AND component not in ('Camera', 'DOE Funded Commissioning', 'DOE Funded Operations', 'Other', 'Operations') "
projectname = "LSST NSF"
elif subsystem == 'ALL':
fundingstring = ""
projectname = "LSST"
elif subsystem == 'DM':
fundingstring = " AND component = 'Data Management' "
projectname = 'DM'
elif subsystem == 'TS':
fundingstring = " AND component = 'Telescope & Site' "
projectname = 'Telescope & Site'
elif subsystem == 'CAM':
fundingstring = " AND component = 'Camera' "
projectname = 'CAM'
elif subsystem == 'SE':
fundingstring = " AND component = 'Systems Engineering' "
projectname = 'Systems Engineering'
elif subsystem == 'PMO':
fundingstring = " AND component = 'Project Management Office' "
projectname = 'Project Management'
elif subsystem == 'EPO':
fundingstring = " AND component = 'Education and Public Outreach' "
projectname = 'EPO'
elif subsystem == 'NSF_P6':
fundingstring = " AND component in ('Telescope & Site', 'Systems Engineering', 'Project Management Office') "
projectname = "LSST"
##############################################################################
################### Simple escalation model
##############################################################################
escalate = array(10)
sum = 0.0
escalate = {} # a dictionary
escalate[date_base_year] = 1.0
for jj in range(nyears):
escalate[yer[jj+1]] = escalate[yer[jj]] * annual_esc
sum += escalate[yer[jj+1]]
escalate['dist_sum'] = sum/nyears
server = "https://jira.lsstcorp.org"
auth_inf = (username,password)
try:
jira = JIRA(server=server,basic_auth=auth_inf)
except:
print("ERROR: Jira authentication failed. Have you provided the correct username and password?")
return
query = "project=RM AND issuetype='RM-Risk' AND status='Active Risk/Opportunity' AND cf[13601] is EMPTY" + fundingstring + "ORDER BY cf[13108]"
fields="components,summary,customfield_14807,customfield_14804,customfield_14803,customfield_13107,customfield_13108,customfield_13110,customfield_13111,description"
print(('\n\r Query to database \n\r\n\r'+ query +'\n\r'))
issues = jira.search_issues(query,maxResults=None,fields=fields)
nrisks = len(issues)
rows=[]
mean_prob_lookup = {'0%-1%':0.005,
'0%-5%':0.025,
'5%-10%':0.075,
'10%-25%':0.17,
'25%-50%':0.37,
'50%-75%':0.63,
'75%-100%':0.88}
rows=[]
for i in range(len(issues)):
rows.append({'riskid':int(''.join([i for i in issues[i].key if i.isdigit()])),
'projectsystem':xstr(issues[i].fields.components[0].name),
'mitigated_probability':xstr(issues[i].fields.customfield_14807),
'mitigated_expense_expected':issues[i].fields.customfield_14804,
'mitigated_schedule_cost_expected':issues[i].fields.customfield_14803,
'meanprobability':mean_prob_lookup[issues[i].fields.customfield_14807],
'total_cost':0.0,
'obligationmodel':xstr(issues[i].fields.customfield_13107),
'triggerdate':(datetime.datetime.strptime(issues[i].fields.customfield_13108,'%Y-%m-%d').date() if issues[i].fields.customfield_13108 else datetime.date(2000,1,1)),
'randomtrigger':(int(issues[i].fields.customfield_13110) if issues[i].fields.customfield_13110 else 0),
'risktitle':xstr(issues[i].fields.summary),
'riskdescription':xstr(issues[i].fields.description),
'randomperiod':xstr(issues[i].fields.customfield_13111) })
# setup lists
nyears=[1 for i in range(nrisks)]
riskheader = [' ' for i in range(3000)]
riskid=[] # issue.key
projectsystem=[] # issue.fields.components
mitigated_probability=[] # issue.fields.customfield_14807
mitigated_expense_expected=[] # issue.fields.customfield_14804
mitigated_schedule_cost_expected=[] # issue.fields.customfield_14803
meanprobability=[] # calculate from cf 14807
total_cost=[] # issue.fields.customfield_14803 + issue.customfield_14804
obligationmodel=[] # issue.fields.customfield_13107
triggerdate=[] # issue.fields.customfield_13108
randomtrigger=[] # issue.fields.customfield_13110 and issue.customfield_13111
risktitle=[] # issue.fields.summary
riskdescription = [] # issue.fields.description
randomperiod = []
## Rule 0 - Accept all risks, simple passthrough
## print "\n\r Rule 1 - Accept only risks that have total cost of more than $1M \n\r"
## print "\n\r Rule 2 - Accept only risks that have expected exposure of more that $200K \n\r"
## print "\n\r Rule 3 - Accept risks that pass Rule 1 OR Rule 2 \n\r"
## Store the database values into arrays
print('\n\r Summary of risks ordered by triggerdate \n\r\n\r')
for ii in range(nrisks):
lasttotalcost = (float(rows[ii]['mitigated_expense_expected'])+float(rows[ii]['mitigated_schedule_cost_expected']))
##############################################################################
################### Use simple model of escalation to convert to as-spent dollars
##############################################################################
if rows[ii]['obligationmodel'] == "trigger" :
yr = rows[ii]['triggerdate'].year
yr = max(int(date_year_start),int(yr))
yr = min(int(date_year_end),int(yr))
lasttotalcost = lasttotalcost * escalate[str(yr)]
else:
lasttotalcost = lasttotalcost * escalate['dist_sum']
##############################################################################
if lasttotalcost >= 0.00:
## print("\n\r Rule 0 - Accept all risks, simple passthrough \n\r")
## Rule 1 - Accept only risks that have total cost of more than $1M
## if lasttotalcost >= 1000.00:
## Rule 2 - Accept only risks that have expected exposure of more that $200K
## if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0:
## Rule 3 - Accept risks that pass Rule 1 OR Rule 2
## if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0 or lasttotalcost >= 1000.00:
riskid.append(rows[ii]['riskid'])
projectsystem.append(rows[ii]['projectsystem'])
mitigated_probability.append(rows[ii]['mitigated_probability'])
mitigated_expense_expected.append(rows[ii]['mitigated_expense_expected'])
mitigated_schedule_cost_expected.append(rows[ii]['mitigated_schedule_cost_expected'])
meanprobability.append(float(rows[ii]['meanprobability']))
obligationmodel.append(rows[ii]['obligationmodel'])
triggerdate.append(rows[ii]['triggerdate'])
randomtrigger.append(rows[ii]['randomtrigger'])
risktitle.append(rows[ii]['risktitle'])
riskdescription.append(rows[ii]['riskdescription'])
total_cost.append(lasttotalcost)
randomperiod.append(rows[ii]['randomperiod'])
## Print formatted output
print('{:>30} RM-{:4} {:>10} {:>22} {:>5} [{:>8.2f} {:>8.2f}] {:>8.2f} {:40} {:80}'.format(
rows[ii]['projectsystem'],
str(rows[ii]['riskid']),
str(rows[ii]['triggerdate']),
#rows[ii]['obligationmodel'][0:4],
rows[ii]['obligationmodel'],
#rows[ii]['randomtrigger'] % 1000,
rows[ii]['randomtrigger'],
lasttotalcost,
rows[ii]['meanprobability'],
float(rows[ii]['meanprobability'])*lasttotalcost,
str(rows[ii]['risktitle']),
str(rows[ii]['riskdescription']),
))
nrisks = len(riskid)
## Print risks ordered by riskid
print(('\n\r Summary of {:>3} risks ordered by riskid \n\r\n\r'.format(str(nrisks))))
hold_riskid,hold_projectsystem,hold_risktitle = (list(t) for t in zip(*sorted(zip(riskid,projectsystem,risktitle))))
for ii in range(nrisks):
print('{:>30} RM-{:3} {:40}'.format( hold_projectsystem[ii],str(hold_riskid[ii]),hold_risktitle[ii]))
## Print risk description ordered by totalcost
print(('\n\r Summary of {:>3} risks ordered by totalcost \n\r\n\r'.format(str(nrisks))))
hold_total_cost,hold_riskdescription,hold_projectsystem,hold_riskid,hold_meanprobability = (list(t) for t in zip(*sorted(zip(total_cost,riskdescription,projectsystem,riskid,meanprobability), reverse=True)))
for ii in range(nrisks):
print('{:>30} RM-{:3} ${:8,.7}K [{:<4}] {:<100}'.format( hold_projectsystem[ii],str(hold_riskid[ii]),hold_total_cost[ii],hold_meanprobability[ii],hold_riskdescription[ii]))
## Figure 4
## Interaction loop over risks. Also, plot fig 4 with the risk spend curve
max_hold = 0.0
fig4 = plt.figure(4)
ax1 = fig4.add_subplot(111)
###################################################################
############ Begin main Monte Carlo iteration loop ################
###################################################################
for ii in range(num_trials):
delta_this_iteration = []
triggerdate_this_iteration = []
projectsystem_this_iteration = []
riskid_this_iteration = []
###################################################################
############ Random loop over each risk ################
###################################################################
##
## Each risk has a specified date of possible occurence. A risk can occur at a specified trigger date;
# at some random time; or a risk may occur more than once over a specified range of dates.
## Trigger case
for jj in range(nrisks):
if obligationmodel[jj] == "Trigger date":
choice=np.random.uniform(0.0,1.0,1)
if choice <= meanprobability[jj] :
addit = float(total_cost[jj])
else:
addit = float(0.0)
delta_this_iteration.append(addit)
triggerdate_this_iteration.append(triggerdate[jj])
projectsystem_this_iteration.append(projectsystem[jj])
riskid_this_iteration.append(int(riskid[jj]))
## Random case
elif obligationmodel[jj] == "Random occurrence(s)":
nrandom = randomtrigger[jj]
#print("random risk; nrandom = "+str(nrandom))
#periodcode = randomtrigger[jj] / 1000
#print("random risk periodcode = "+str(periodcode))
periodcode = 3
if randomperiod[jj] == 'Construction only':
periodcode = 1
elif randomperiod[jj] == 'Commissioning only':
periodcode = 2
elif randomperiod[jj] == 'Both Construction and Commissioning':
periodcode = 3
date1 = date_start
date2 = date_commissioning_start
if periodcode == 1: # random during construction only
date1 = date_start
date2 = date_commissioning_start
elif periodcode == 2: # random during commissioning only
date1 = date_commissioning_start
date2 = date_end
elif periodcode == 3: # random throughout project
date1 = date_start
date2 = date_end
for kk in range(nrandom):
stime = time.mktime(time.strptime(date1, '%Y-%m-%d'))
etime = time.mktime(time.strptime(date2, '%Y-%m-%d'))
ptime = stime + np.random.uniform(etime - stime)
randomdate = datetime.date.fromtimestamp(int(ptime))
#print(randomdate)
choice = np.random.uniform(0.0,1.0)
if choice <= meanprobability[jj] :
addit = float(total_cost[jj])/float(nrandom)
else:
addit = float(0.0)
delta_this_iteration.append(addit)
triggerdate_this_iteration.append(randomdate)
projectsystem_this_iteration.append(projectsystem[jj])
riskid_this_iteration.append(int(riskid[jj]))
## Distributed case
elif obligationmodel[jj] == "Distributed occurrence":
if ii == 0: # only on first pass through will triggerdate always have the proper value
#print ii,jj,triggerdate[jj],triggerdate[jj].year
ny = max(triggerdate[jj].year - 2014,1) # risk is distributed over this many years but must be at least 1
nyears[jj] = min(ny,8) # must store the corect values of nyears for each distributed risk
for kk in range(nyears[jj]):
year = 2015 + kk #kk starts at zero. Don't include short period in 2014
choice=np.random.uniform(0.0,1.0,1)
if choice <= meanprobability[jj] :
addit = float(total_cost[jj])/float(nyears[jj])
else:
addit = float(0.0)
delta_this_iteration.append(addit)
triggerdate_this_iteration.append(datetime.date(year,randrange(1,12),1)) # random month in year, always assign the first day of the month
projectsystem_this_iteration.append(projectsystem[jj])
riskid_this_iteration.append(int(riskid[jj]))
else:
sys.exit(" obligationmode not defined for risk "+str(projectsystem[jj]) + str(riskid[jj])+" " +str(jj))
###################################################################
############ End short random loop over risk ################
###################################################################
# Since random and distributed risks have been added the lists are no longer in date order.
# Need to resort the two arrays by effective trigger dates using: list1, list2 = (list(t) for t in zip(*sorted(zip(list1, list2)))) - YIKES
#print(riskid_this_iteration)
triggerdate_this_iteration, delta_this_iteration,projectsystem_this_iteration, riskid_this_iteration = (list(t) for t in zip(*sorted(zip(triggerdate_this_iteration,delta_this_iteration,projectsystem_this_iteration,riskid_this_iteration))))
#print(type(riskid_this_iteration),riskid_this_iteration)
#print(" ")
#print(delta)
# Compute the running sum
xx_this_iteration = np.cumsum(delta_this_iteration)
len_xx = len(xx_this_iteration)
###################################################################
############# Some diagnostic output #############################
###################################################################
nprintout = 5 # number of simulations with diagnostic output
diagnostic_steps = num_trials / nprintout
if ii % diagnostic_steps == 0:
print(('\n\r\n\r\n\r Diagnostic output for iteration '+str(ii)+' \n\r'))
for mm in range(len_xx):
header = riskheader[riskid_this_iteration[mm]]
line = [header,projectsystem_this_iteration[mm],riskid_this_iteration[mm],str(triggerdate_this_iteration[mm]),delta_this_iteration[mm],xx_this_iteration[mm]]
print('{:>6}{:>30} RM-{:3} {:>15} {:12.1f} {:12.1f}'.format(*line))
#print(line)
# Store the grand totals
# reserve the storage arrays on the first iteration
if ii == 0:
totals = np.zeros(len_xx)
totals2 = np.zeros(len_xx)
#print len(xx),len_xx,len(totals),len(totals2)
totals += xx_this_iteration
totals2 += xx_this_iteration * xx_this_iteration
final_totals_distribution.append(xx_this_iteration[len_xx - 1]*0.001) # Convert from K$ to M$
## The step method plots the spend curve, plot only every 50th iteration line
if ii%50 == 0:
#print len(triggerdate),len(xx)
#print(triggerdate)
#print(" ")
#print(xx)
pylab.step(triggerdate_this_iteration,total_contingency - xx_this_iteration,where='post') # plot the spend curve using step
max_hold | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2014-05-01 16:00:07
# @Last Modified by: <NAME>
# @Last Modified time: 2015-03-14 22:40:14
import numpy as np
from scipy.signal import butter, lfilter
from scipy import interpolate, fftpack
# from sklearn import preprocessing
# from sklearn.decomposition import FastICA
"""
Signal Processing helper methods
"""
def bandpass(data, fps, lowcut, highcut):
shape = data.shape
if len(shape) > 1:
for k in range(1,shape[1]):
npad = int(10*fps)
data_pad = np.lib.pad(data[:,k], npad, 'median')
data_pad = butter_bandpass_filter(data_pad, lowcut, highcut, fps, 5)
data[:,k] = data_pad[npad:-npad]
else:
npad = int(10*fps)
data_pad = np.lib.pad(data, npad, 'median')
data_pad = butter_bandpass_filter(data_pad, lowcut, highcut, fps, 5)
data = data_pad[npad:-npad]
return data
def downsample(data,mult):
"""Given 1D data, return the binned average."""
# print data.shape
# print len(data)
overhang=len(data)%mult
if overhang: data=data[:-overhang]
data=np.reshape(data,(len(data)/mult,mult))
data=np.average(data,1)
# print data.shape
return data
def butter_bandpass( lowcut, highcut, fs, order=6):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='bandpass')
return b, a
def butter_lowpass( highcut, fs, order=6):
nyq = 0.5 * fs
high = highcut / nyq
b, a = butter(order, high, btype='low')
return b, a
def butter_bandpass_filter( data, lowcut, highcut, fs, order=6):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def butter_low_filter( data, highcut, fs, order=6):
b, a = butter_lowpass(highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def compute_fft(time, data, Fs):
shape = data.shape
L = shape[0]
if len(shape) > 1:
dim = shape[1]
else:
dim = 1
#------ Data preprocessing ------------
# even_times = np.linspace(time[0], time[-1], L)
# f_interp = interpolate.interp1d(time, data, kind='linear', axis=0)
# interpolated = f_interp(even_times)
# interpolated = data
# interpolated = (np.hamming(L) * interpolated.T).T
# interpolated = interpolated - np.mean(interpolated, axis=0)
# data = interpolated
# nfft = int(2**np.ceil(np.log2(L))) #force length to be next power of 2
nfft = L
# L=nfft
#------- FFT and ideal filter -------------
raw = fftpack.fft(data, nfft, axis=0) #run fft
fft = np.abs(raw[0:L//2]) #get magnitude/real part
phase = np.angle(raw[0:L//2])
return 60. * np.linspace(0.0, Fs, L), np.abs(raw), np.angle(raw)
# perform spectracl subtraction to remove noise and return de-noised mag and reconstructed signal with ideal filter between 45 and 180 bpm
# this is not currently used, but may be useful for future improvements
def spectral_subtract(noise_mag, signal_mag, signal_phase, nfft, freqs):
clean_spec = signal_mag - noise_mag
clean_spec[clean_spec < 0.] = 0.
enh_spec = np.multiply(clean_spec, np.exp(1j * signal_phase))
enh_signal = np.abs(fftpack.ifft(enh_spec, nfft))
return clean_spec, enh_signal
def detect_beats(x, bpm):
"""Detect beats in pulse data based on their amplitude.
Parameters
----------
x : 1D array_like
data.
bpm : number, approximate bpm (num frames between beats),detect peaks within frame range.
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
"""
ind = []
beat_frames = np.zeros(len(x))
last_beat = None
beat_start = 0
beat_end = bpm
last_frame = len(x) - 1
slide = True
minval = np.min(x)
maxval = np.max(x)
small_diff = (maxval - minval) * .05
while slide:
if beat_end == last_frame:
slide = False
peaks = detect_peaks(x[beat_start:beat_end])
if len(peaks) == 0:
beat_end += int(bpm/2)
else:
if len(peaks) == 1:
i = peaks[0]
else:
idx = np.argsort(x[beat_start:beat_end][peaks]) # peaks by height
if x[beat_start:beat_end][peaks][idx[-1]] - x[beat_start:beat_end][peaks][idx[-2]] < small_diff:
if idx[-2] < idx[-1]:
i = peaks[idx[-2]]
else:
i = peaks[idx[-1]]
else:
i = peaks[idx[-1]]
i += beat_start
ind.append(i)
if last_beat == None:
last_beat = i
else:
frames = i - last_beat
beat_frames[last_beat:i] = frames
last_beat = i
beat_end = int(i + bpm/2) # start looking for the next beat a half cycle from this peak
beat_start = beat_end
beat_end = beat_start + bpm
if beat_start >= last_frame - int(bpm * .25): # if within a quarter beat from the end, stop
slide = False
elif beat_end > last_frame:
beat_end = last_frame
return np.array(ind), beat_frames
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None, pdf_fig=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
>>> from detect_peaks import detect_peaks
>>> x = np.random.randn(100)
>>> x[60:81] = np.nan
>>> # detect all peaks and plot data
>>> ind = detect_peaks(x, show=True)
>>> print(ind)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # set minimum peak height = 0 and minimum peak distance = 20
>>> detect_peaks(x, mph=0, mpd=20, show=True)
>>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
>>> # set minimum peak distance = 2
>>> detect_peaks(x, mpd=2, show=True)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # detection of valleys instead of peaks
>>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
>>> x = [0, 1, 1, 0, 1, 1, 0]
>>> # detect both edges
>>> detect_peaks(x, edge='both', show=True)
>>> x = [-2, 1, -2, 2, 1, 1, 3, 0]
>>> # set threshold = 2
>>> detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
plot_peaks(x, | |
<filename>.venv/lib/python3.8/site-packages/jeepney/io/trio.py<gh_stars>0
import array
from contextlib import contextmanager
import errno
from itertools import count
import logging
from typing import Optional
try:
from contextlib import asynccontextmanager # Python 3.7
except ImportError:
from async_generator import asynccontextmanager # Backport for Python 3.6
from outcome import Value, Error
import trio
from trio.abc import Channel
from jeepney.auth import Authenticator, BEGIN
from jeepney.bus import get_bus
from jeepney.fds import FileDescriptor, fds_buf_size
from jeepney.low_level import Parser, MessageType, Message
from jeepney.wrappers import ProxyBase, unwrap_msg
from jeepney.bus_messages import message_bus
from .common import (
MessageFilters, FilterHandle, ReplyMatcher, RouterClosed, check_replyable,
)
log = logging.getLogger(__name__)
__all__ = [
'open_dbus_connection',
'open_dbus_router',
'Proxy',
]
# The function below is copied from trio, which is under the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
@contextmanager
def _translate_socket_errors_to_stream_errors():
try:
yield
except OSError as exc:
if exc.errno in {errno.EBADF, errno.ENOTSOCK}:
# EBADF on Unix, ENOTSOCK on Windows
raise trio.ClosedResourceError("this socket was already closed") from None
else:
raise trio.BrokenResourceError(
"socket connection broken: {}".format(exc)
) from exc
class DBusConnection(Channel):
"""A plain D-Bus connection with no matching of replies.
This doesn't run any separate tasks: sending and receiving are done in
the task that calls those methods. It's suitable for implementing servers:
several worker tasks can receive requests and send replies.
For a typical client pattern, see :class:`DBusRouter`.
Implements trio's channel interface for Message objects.
"""
def __init__(self, socket, enable_fds=False):
self.socket = socket
self.enable_fds = enable_fds
self.parser = Parser()
self.outgoing_serial = count(start=1)
self.unique_name = None
self.send_lock = trio.Lock()
self.recv_lock = trio.Lock()
self._leftover_to_send = None # type: Optional[memoryview]
async def send(self, message: Message, *, serial=None):
"""Serialise and send a :class:`~.Message` object"""
async with self.send_lock:
if serial is None:
serial = next(self.outgoing_serial)
fds = array.array('i') if self.enable_fds else None
data = message.serialise(serial, fds=fds)
await self._send_data(data, fds)
# _send_data is copied & modified from trio's SocketStream.send_all() .
# See above for the MIT license.
async def _send_data(self, data: bytes, fds):
if self.socket.did_shutdown_SHUT_WR:
raise trio.ClosedResourceError("can't send data after sending EOF")
with _translate_socket_errors_to_stream_errors():
if self._leftover_to_send:
# A previous message was partly sent - finish sending it now.
await self._send_remainder(self._leftover_to_send)
with memoryview(data) as data:
if fds:
sent = await self.socket.sendmsg([data], [(
trio.socket.SOL_SOCKET, trio.socket.SCM_RIGHTS, fds
)])
else:
sent = await self.socket.send(data)
await self._send_remainder(data, sent)
async def _send_remainder(self, data: memoryview, already_sent=0):
try:
while already_sent < len(data):
with data[already_sent:] as remaining:
sent = await self.socket.send(remaining)
already_sent += sent
self._leftover_to_send = None
except trio.Cancelled:
# Sending cancelled mid-message. Keep track of the remaining data
# so it can be sent before the next message, otherwise the next
# message won't be recognised.
self._leftover_to_send = data[already_sent:]
raise
async def receive(self) -> Message:
"""Return the next available message from the connection"""
async with self.recv_lock:
while True:
msg = self.parser.get_next_message()
if msg is not None:
return msg
# Once data is read, it must be given to the parser with no
# checkpoints (where the task could be cancelled).
b, fds = await self._read_data()
if not b:
raise trio.EndOfChannel("Socket closed at the other end")
self.parser.add_data(b, fds)
async def _read_data(self):
if self.enable_fds:
nbytes = self.parser.bytes_desired()
with _translate_socket_errors_to_stream_errors():
data, ancdata, flags, _ = await self.socket.recvmsg(
nbytes, fds_buf_size()
)
if flags & getattr(trio.socket, 'MSG_CTRUNC', 0):
self._close()
raise RuntimeError("Unable to receive all file descriptors")
return data, FileDescriptor.from_ancdata(ancdata)
else: # not self.enable_fds
with _translate_socket_errors_to_stream_errors():
data = await self.socket.recv(4096)
return data, []
def _close(self):
self.socket.close()
self._leftover_to_send = None
# Our closing is currently sync, but AsyncResource objects must have aclose
async def aclose(self):
"""Close the D-Bus connection"""
self._close()
@asynccontextmanager
async def router(self):
"""Temporarily wrap this connection as a :class:`DBusRouter`
To be used like::
async with conn.router() as req:
reply = await req.send_and_get_reply(msg)
While the router is running, you shouldn't use :meth:`receive`.
Once the router is closed, you can use the plain connection again.
"""
async with trio.open_nursery() as nursery:
router = DBusRouter(self)
await router.start(nursery)
try:
yield router
finally:
await router.aclose()
async def open_dbus_connection(bus='SESSION', *, enable_fds=False) -> DBusConnection:
"""Open a plain D-Bus connection
:return: :class:`DBusConnection`
"""
bus_addr = get_bus(bus)
sock : trio.SocketStream = await trio.open_unix_socket(bus_addr)
# Authentication
authr = Authenticator(enable_fds=enable_fds)
for req_data in authr:
await sock.send_all(req_data)
authr.feed(await sock.receive_some())
await sock.send_all(BEGIN)
conn = DBusConnection(sock.socket, enable_fds=enable_fds)
# Say *Hello* to the message bus - this must be the first message, and the
# reply gives us our unique name.
async with conn.router() as router:
reply = await router.send_and_get_reply(message_bus.Hello())
conn.unique_name = reply.body[0]
return conn
class TrioFilterHandle(FilterHandle):
def __init__(self, filters: MessageFilters, rule, send_chn, recv_chn):
super().__init__(filters, rule, recv_chn)
self.send_channel = send_chn
@property
def receive_channel(self):
return self.queue
async def aclose(self):
self.close()
await self.send_channel.aclose()
async def __aenter__(self):
return self.queue
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.aclose()
class Future:
"""A very simple Future for trio based on `trio.Event`."""
def __init__(self):
self._outcome = None
self._event = trio.Event()
def set_result(self, result):
self._outcome = Value(result)
self._event.set()
def set_exception(self, exc):
self._outcome = Error(exc)
self._event.set()
async def get(self):
await self._event.wait()
return self._outcome.unwrap()
class DBusRouter:
"""A client D-Bus connection which can wait for replies.
This runs a separate receiver task and dispatches received messages.
"""
_nursery_mgr = None
_rcv_cancel_scope = None
def __init__(self, conn: DBusConnection):
self._conn = conn
self._replies = ReplyMatcher()
self._filters = MessageFilters()
@property
def unique_name(self):
return self._conn.unique_name
async def send(self, message, *, serial=None):
"""Send a message, don't wait for a reply
"""
await self._conn.send(message, serial=serial)
async def send_and_get_reply(self, message) -> Message:
"""Send a method call message and wait for the reply
Returns the reply message (method return or error message type).
"""
check_replyable(message)
if self._rcv_cancel_scope is None:
raise RouterClosed("This DBusRouter has stopped")
serial = next(self._conn.outgoing_serial)
with self._replies.catch(serial, Future()) as reply_fut:
await self.send(message, serial=serial)
return (await reply_fut.get())
def filter(self, rule, *, channel: Optional[trio.MemorySendChannel]=None, bufsize=1):
"""Create a filter for incoming messages
Usage::
async with router.filter(rule) as receive_channel:
matching_msg = await receive_channel.receive()
# OR:
send_chan, recv_chan = trio.open_memory_channel(1)
async with router.filter(rule, channel=send_chan):
matching_msg = await recv_chan.receive()
If the channel fills up,
The sending end of the channel is closed when leaving the ``async with``
block, whether or not it was passed in.
:param jeepney.MatchRule rule: Catch messages matching this rule
:param trio.MemorySendChannel channel: Send matching messages here
:param int bufsize: If no channel is passed in, create one with this size
"""
if channel is None:
channel, recv_channel = trio.open_memory_channel(bufsize)
else:
recv_channel = None
return TrioFilterHandle(self._filters, rule, channel, recv_channel)
# Task management -------------------------------------------
async def start(self, nursery: trio.Nursery):
if self._rcv_cancel_scope is not None:
raise RuntimeError("DBusRouter receiver task is already running")
self._rcv_cancel_scope = await nursery.start(self._receiver)
async def aclose(self):
"""Stop the sender & receiver tasks"""
# It doesn't matter if we receive a partial message - the connection
# should ensure that whatever is received is fed to the parser.
if self._rcv_cancel_scope is not None:
self._rcv_cancel_scope.cancel()
self._rcv_cancel_scope = None
# Ensure trio checkpoint
await trio.sleep(0)
# Code to run in receiver task ------------------------------------
def _dispatch(self, msg: Message):
"""Handle one received message"""
if self._replies.dispatch(msg):
return
for filter in self._filters.matches(msg):
try:
filter.send_channel.send_nowait(msg)
except trio.WouldBlock:
pass
async def _receiver(self, task_status=trio.TASK_STATUS_IGNORED):
"""Receiver loop - runs in a separate task"""
with trio.CancelScope() as cscope:
self.is_running = True
task_status.started(cscope)
try:
while True:
msg = await self._conn.receive()
self._dispatch(msg)
finally:
self.is_running = False
# Send errors to any tasks still waiting for a message.
self._replies.drop_all()
# Closing a memory channel can't block, but it only has an
# async close method, so we need to shield it from cancellation.
with trio.move_on_after(3) as cleanup_scope:
for filter in self._filters.filters.values():
cleanup_scope.shield = True
await filter.send_channel.aclose()
class Proxy(ProxyBase):
"""A trio proxy for calling D-Bus methods
You can call methods on the proxy object, such as ``await bus_proxy.Hello()``
to make a method call over D-Bus and wait for a reply. It will either
return a tuple of returned data, or raise | |
# Copyright (c) 2012-2016, <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Todo:
# - Allow persistance of discovered servers.
# - The control point should wait at least the amount of time specified in the
# MX header for responses to arrive from devices.
# - Date/datetime
# - Store all properties
# - SSDP.discover(st): Allow to discover only certain service types
# - .find() method on most classes.
# - async discover (if possible).
# - Read parameter types and verify them when doing a call.
# - Marshall return values to the correct databases.
# - Handle SOAP error:
# <?xml version="1.0"?>
# <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <s:Fault>
# <faultcode>s:Client</faultcode>
# <faultstring>UPnPError</faultstring>
# <detail>
# <UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
# <errorCode xmlns="">714</errorCode>
# <errorDescription xmlns="">No such entry in array</errorDescription>
# </UPnPError>
# </detail>
# </s:Fault>
# </s:Body>
# </s:Envelope>
# - Test params and responses with XML entities in them "<", "&", etc.
# - AllowedValueRange
# <allowedValueRange>
# <minimum>minimum value</minimum>
# <maximum>maximum value</maximum>
# <step>increment value</step>
# </allowedValueRange>
# - Name params as 'NewFoo', or not (See spec)?
"""
This module provides an UPnP Control Point (client), and provides an easy
interface to discover and communicate with UPnP servers. It implements SSDP
(Simple Service Discovery Protocol), SCPD (Simple Control Point Definition) and
a minimal SOAP (Simple Object Access Protocol) implementation.
The usual flow for working with UPnP servers is:
- Discover UPnP servers using SSDP.
SSDP is a simple HTTP-over-UDP protocol. An M-SEARCH HTTP request is broad-
casted over the network and any UPnP servers should respond with an HTTP
response. This response includes an URL to an XML file containing information
about the server. The SSDP.discover() method returns a list of Server
instances. If you already know the URL of the XML file, you can skip this
step and instantiate a Server instance directly.
- Inspect Server capabilities using SCPD.
The XML file returned by UPnP servers during discovery is read and information
about the server and the services it offers is stored in a Server instance. The
Server.services property contains a list of Service instances supported by that
server.
- Inspect Services capabilities using SCPD.
Each Server may contain more than one Services. For each Service, a separate
XML file exists. The Service class reads that XML file and determines which
actions a service supports. The Service.actions property contains a list of
Action instances supported by that service.
- Inspect an Action using SCPD.
An Action instance may be inspected to determine which arguments need to be
passed into it and what it returns. Information on the type and possible
values of each argument can also be queried.
- Call an Action using SOAP.
An Action instance may then be called using the Action.call(arguments) method.
The Action class will verify the correctness of arguments, possibly
converting them. A SOAP call is then made to the UPnP server and the results
are returned.
Classes:
* SSDP: Discover UPnP servers using the SSDP class.
* Server: Connect to an UPnP server and retrieve information/capabilities using the Server class.
* Service: Query a Server class instance for the various services it supports.
* Action: Query a Service class instance for the various actions it supports and call them.
Various convenience methods are provided at almost all levels. For instance,
the find_action() methods can directly find a method (by name) in an UPnP
server/service. The call() method can be used at most levels to directly call
an action.
The following example discovers all UPnP servers on the local network and then
dumps all their services and actions:
------------------------------------------------------------------------------
import upnpclient
ssdp = upnpclient.SSDP()
servers = ssdp.discover()
for server in servers:
print "%s: %s" % (server.friendly_name, server.model_description)
for service in server.services:
print " %s" % (service.service_type)
for action in service.actions:
print " %s" % (action.name)
for arg_name, arg_def in action.argsdef_in:
valid = ', '.join(arg_def['allowed_values']) or '*'
print " in: %s (%s): %s" % (arg_name, arg_def['datatype'], valid)
for arg_name, arg_def in action.argsdef_out:
valid = ', '.join(arg_def['allowed_values']) or '*'
print " out: %s (%s): %s" % (arg_name, arg_def['datatype'], valid)
------------------------------------------------------------------------------
"""
import logging
import socket
import struct
import urllib2
import xml.dom.minidom
import sys
from urlparse import urljoin
def _XMLGetNodeText(node):
"""
Return text contents of an XML node.
"""
text = []
for childNode in node.childNodes:
if childNode.nodeType == node.TEXT_NODE:
text.append(childNode.data)
return(''.join(text))
def _XMLFindNodeText(node, tag_name):
"""
Find the first XML node matching `tag_name` and return its text contents.
If no node is found, return empty string. Use for non-required nodes.
"""
target_nodes = node.getElementsByTagName(tag_name)
try:
return(_XMLGetNodeText(target_nodes[0]))
except IndexError:
return('')
def _getLogger(name):
"""
Retrieve a logger instance. Checks if a handler is defined so we avoid the
'No handlers could be found' message.
"""
logger = logging.getLogger(name)
if not logging.root.handlers:
logger.disabled = 1
return(logger)
class UPNPError(Exception):
"""
Exceptio class for UPnP errors.
"""
pass
class SSDP(object):
"""
Simple Service Discovery Protocol. The SSDP class allows for discovery of
UPnP devices by broadcasting on the local network. It does so by sending an
HTTP M-SEARCH command over multicast UDP. The `discover()` method does the
actual discovering. It returns a list of `upnp.Server` class instances of
servers that responded. After discovery, these servers can also be accessed
through the `servers` propery.
Example:
>>> ssdp = SSDP(1)
>>> servers = ssdp.discover()
>>> print upnpservers
[<Server 'SpeedTouch 546 192.168.3.11 UPnP/1.0 (0612BH95K)'>, <Server 'Linux/2.6.35-31-generic, UPnP/1.0, Free UPnP Entertainment Service/0.655'>]
"""
def __init__(self, wait_time=2, listen_port=12333):
"""
Create a new SSDP class. `wait_time` determines how long to wait for
responses from servers. `listen_port` determines the UDP port on which
to send/receive replies.
"""
self.listen_port = listen_port
self.wait_time = wait_time
self._log = _getLogger('SSDP')
def discover_raw(self):
"""
Discover UPnP devices on the network via UDP multicast. Returns a list
of dictionaries, each of which contains the HTTPMU reply headers.
"""
msg = \
'M-SEARCH * HTTP/1.1\r\n' \
'HOST:172.16.31.10:1900\r\n' \
'MAN:"ssdp:discover"\r\n' \
'MX:2\r\n' \
'ST:upnp:rootdevice\r\n' \
'\r\n'
# Send discovery broadcast message
self._log.debug('M-SEARCH broadcast discovery')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(self.wait_time)
s.sendto(msg, ('172.16.31.10', 1900) )
# Wait for replies
ssdp_replies = []
servers = []
try:
while True:
self._log.debug('Waiting for replies...')
data, addr = s.recvfrom(65507)
ssdp_reply_headers = {}
for line in data.splitlines():
if ':' in line:
key, value = line.split(':', 1)
ssdp_reply_headers[key.strip().lower()] = value.strip()
self._log.info('Response from %s:%i %s' % (addr[0], addr[1], ssdp_reply_headers['server']))
self._log.info('%s:%i at %s' % (addr[0], addr[1], ssdp_reply_headers['location']))
if not ssdp_reply_headers in ssdp_replies:
# Prevent multiple responses from showing up multiple
# times.
ssdp_replies.append(ssdp_reply_headers)
except socket.timeout:
pass
s.close()
return(ssdp_replies)
def discover(self):
"""
Convenience method to discover UPnP devices on the network. Returns a
list of `upnp.Server` instances. Any invalid servers are silently
ignored. If you do not want this, use the `SSDP.discover_raw` method.
"""
servers = []
for ssdp_reply in self.discover_raw():
try:
upnp_server = Server(ssdp_reply['location'], ssdp_reply['server'])
servers.append(upnp_server)
except Exception, e:
self._log.error('Error \'%s\' for %s' % (e, ssdp_reply['server']))
pass
return(servers)
class Server(object):
"""
UPNP Server represention.
This class represents an UPnP server. `location` is an URL to a control XML
file, per UPnP standard section 2.1 ('Device Description'). This MUST match
the URL as given in the 'Location' header when using discovery (SSDP).
`server_name` is a name for the server, which may be obtained using the
SSDP class or may be made up by the caller.
| |
<gh_stars>0
import numpy as np
import pandas as pd
from interflow.reader import *
def convert_kwh_bbtu(value: float) -> float:
"""converts energy in kWh to energy in billion btu.
:param value: value in kilowatt-hours of energy
:type value: float
:return: value in bbtu
"""
bbtu = value * 0.000003412140
return bbtu
def convert_mwh_bbtu(value: float) -> float:
"""converts energy in MWh to energy in billion btu.
:param value: value in megawatt-hours of energy
:type value: float
:return: value in bbtu
"""
bbtu = value * 0.003412
return bbtu
def prep_water_use_2015(variables=None, all_variables=False) -> pd.DataFrame:
"""prepares 2015 water use data from USGS. Includes modifications such as replacing non-numeric values,
reducing available variables in output dataframe, renaming variables appropriately,
and returning a dataframe of specified variables.
:param variables: None if no specific variables required in addition to FIPS code, state name,
and county name. Default is None, otherwise a list of additional
variables to include in returned dataframe.
:type variables: list
:param all_variables: Include all available variables in returned dataframe. Default is False.
:type all_variables: bool
:return: DataFrame of a water withdrawal and consumption values for 2015
at the county level
"""
# read in 2015 USGS data
df = get_water_use_2015_data()
# replacing characters for missing data with value of zero
df.replace("--", 0, inplace=True)
# creating a dictionary of required variables from full dataset
variables_list = ['FIPS', 'STATE', 'COUNTY', 'TP-TotPop',
'PS-WGWFr', 'PS-WSWFr', 'PS-WGWSa', 'PS-WSWSa',
'DO-PSDel', 'DO-WGWFr', 'DO-WSWFr',
'IN-WGWFr', 'IN-WSWFr', 'IN-WGWSa', 'IN-WSWSa',
'MI-WGWFr', 'MI-WSWFr', 'MI-WGWSa', 'MI-WSWSa',
'IC-WGWFr', 'IC-WSWFr', 'IC-RecWW',
'IG-WGWFr', 'IG-WSWFr', 'IG-RecWW',
'LI-WGWFr', 'LI-WSWFr',
'AQ-WGWFr', 'AQ-WGWSa', 'AQ-WSWFr', 'AQ-WSWSa',
'IR-WGWFr', 'IR-WSWFr', 'IR-RecWW', 'IG-CUsFr', 'IC-CUsFr',
'IR-CUsFr', 'PS-Wtotl', 'PT-WGWFr', 'PT-WGWSa', 'PT-WSWFr',
'PT-WSWSa', 'PT-RecWW', 'PT-PSDel']
# convert all columns that should be numerical to floats
numerical_list = df.columns[6:]
for col in numerical_list:
df[col] = df[col].astype(float)
# reduce dataframe to variables in dictionary
df = df[variables_list]
# add leading zeroes to FIPS Code
df['FIPS'] = df['FIPS'].apply(lambda x: '{0:0>5}'.format(x))
# remove states not included in sample analysis
state_remove_list = ['PR', 'VI']
for state in state_remove_list:
df = df[df.STATE != state]
# rename identification variables
df = df.rename(columns={'STATE': 'State', 'COUNTY': 'County'})
# set crop irrigation values equal to total irrigation values if there are no separate crop values
df['IC-WGWFr'] = np.where(((df['IC-WGWFr'] == 0) & (df['IR-WGWFr'] > 0) & (df['IG-WGWFr'] == 0)),
df['IR-WGWFr'],
df['IC-WGWFr'])
df['IC-WSWFr'] = np.where(((df['IC-WSWFr'] == 0) & (df['IR-WSWFr'] > 0) & (df['IG-WSWFr'] == 0)),
df['IR-WSWFr'],
df['IC-WSWFr'])
df['IC-RecWW'] = np.where(((df['IC-RecWW'] == 0) & (df['IR-RecWW'] > 0) & (df['IG-RecWW'] == 0)),
df['IR-WGWFr'],
df['IC-RecWW'])
# return variables specified
if variables is None and all_variables is False:
variables = ['FIPS', "State", "County"]
df = df[variables]
elif variables is None and all_variables is True:
df = df
else:
df = df[variables]
return df
def calc_irrigation_consumption() -> pd.DataFrame:
"""
Takes 2015 USGS water flow data and calculates consumption fractions for crop irrigation and golf irrigation based
on consumptive use in those sub-sectors. Additionally, water withdrawal values for crop irrigation are filled in
with general irrigation values for counties with missing crop irrigation data.
:return: Dataframe of 2015 water flow values and irrigation sub-sector consumption
fractions
"""
# read in prepared 2015 USGS water data
df = prep_water_use_2015(variables=['FIPS', 'State', 'County', 'IR-WGWFr', 'IR-WSWFr', 'IR-RecWW', 'IR-CUsFr',
'IC-WGWFr', 'IC-WSWFr', 'IC-RecWW', 'IC-CUsFr',
'IG-WGWFr', 'IG-WSWFr', 'IG-RecWW', 'IG-CUsFr'])
# calculate fresh surface water consumption fractions for all irrigation to fill missing crop irrigation cells
df['IR_CU_FSW_frac'] = np.where((df['IR-WGWFr'] + df['IR-WSWFr'] + df['IR-RecWW']) > 0,
df['IR-CUsFr'] / (df['IR-WGWFr'] + df['IR-WSWFr'] + df['IR-RecWW']),
0)
# calculate fresh surface water consumption fractions for crop irrigation where data is available
df['IC_CU_FSW_frac'] = np.where((df['IC-WGWFr'] + df['IC-WSWFr'] + df['IC-RecWW']) > 0,
df['IC-CUsFr'] / (df['IC-WGWFr'] + df['IC-WSWFr'] + df['IC-RecWW']),
0)
# calculate fresh surface water consumption fractions for golf irrigation where data is available
df['IG_CU_FSW_frac'] = np.where((df['IG-WGWFr'] + df['IG-WSWFr'] + df['IG-RecWW']) > 0,
df['IG-CUsFr'] / (df['IG-WGWFr'] + df['IG-WSWFr'] + df['IG-RecWW']),
0)
# replacing consumption fractions for counties with >100% consumption with 100%
df['IR_CU_FSW_frac'] = np.where(df['IR_CU_FSW_frac'] > 1, 1, df['IR_CU_FSW_frac']) # general irrigation
df['IC_CU_FSW_frac'] = np.where(df['IC_CU_FSW_frac'] > 1, 1, df['IC_CU_FSW_frac']) # crop irrigation
df['IG_CU_FSW_frac'] = np.where(df['IG_CU_FSW_frac'] > 1, 1, df['IG_CU_FSW_frac']) # golf irrigation
# set groundwater and reclaimed wastewater consumption fractions equal to fresh surface water
df['IR_CU_FGW_frac'] = df['IR_CU_FSW_frac'] # general irrigation, groundwater
df['IR_CU_RWW_frac'] = df['IR_CU_FSW_frac'] # general irrigation, reclaimed wastewater
df['IC_CU_FGW_frac'] = df['IC_CU_FSW_frac'] # crop irrigation, groundwater
df['IC_CU_RWW_frac'] = df['IC_CU_FSW_frac'] # crop irrigation, reclaimed wastewater
df['IG_CU_FGW_frac'] = df['IG_CU_FSW_frac'] # golf irrigation, groundwater
df['IG_CU_RWW_frac'] = df['IG_CU_FSW_frac'] # golf irrigation, reclaimed wastewater
# list of states that do not have specific crop and golf irrigation values, just total irrigation
state_irrigation_adj_list = ['AR', 'HI', 'LA', 'MS', 'MO', 'MT', 'NE', 'NJ', 'ND',
'OK', 'SD', 'TX', 'WI', 'WY', 'PR', 'VI']
# fills crop irrigation values with total irrigation withdrawal and consumption values for states in list
for state in state_irrigation_adj_list:
# withdrawals
df['IC-WSWFr'] = np.where(df['State'] == state, df['IR-WSWFr'], df['IC-WSWFr']) # fresh surface water
df['IC-WGWFr'] = np.where(df['State'] == state, df['IR-WGWFr'], df['IC-WGWFr']) # fresh groundwater
df['IC-RecWW'] = np.where(df['State'] == state, df['IR-RecWW'], df['IC-RecWW']) # reclaimed wastewater
# consumption fractions
df['IC_CU_FSW_frac'] = np.where(df['State'] == state, df['IR_CU_FSW_frac'],
df['IC_CU_FSW_frac']) # fresh surface
df['IC_CU_FGW_frac'] = np.where(df['State'] == state, df['IR_CU_FGW_frac'],
df['IC_CU_FGW_frac']) # fresh ground
df['IC_CU_RWW_frac'] = np.where(df['State'] == state, df['IR_CU_RWW_frac'],
df['IC_CU_RWW_frac']) # reclaimed
# rename variables
variable_dict = {'FIPS': 'FIPS',
'State': 'State',
'County': 'County',
'IC_CU_FSW_frac': 'AGR_crop_fresh_surfacewater_withdrawal_mgd',
'IC_CU_FGW_frac': 'AGR_crop_fresh_groundwater_withdrawal_mgd',
'IC_CU_RWW_frac': 'AGR_crop_reclaimed_wastewater_import_mgd',
'IG_CU_FSW_frac': 'AGR_golf_fresh_surfacewater_withdrawal_mgd',
'IG_CU_FGW_frac': 'AGR_golf_fresh_groundwater_withdrawal_mgd',
'IG_CU_RWW_frac': 'AGR_golf_reclaimed_wastewater_import_mgd'}
variable_list = list(variable_dict.keys())
df = df[variable_list]
df = df.rename(columns=variable_dict)
# create a list of sector water withdrawal variable name starters
flow_list = ['AGR_crop_fresh_surfacewater_withdrawal_mgd', 'AGR_crop_fresh_groundwater_withdrawal_mgd',
'AGR_crop_reclaimed_wastewater_import_mgd', 'AGR_golf_fresh_surfacewater_withdrawal_mgd',
'AGR_golf_fresh_groundwater_withdrawal_mgd', 'AGR_golf_reclaimed_wastewater_import_mgd']
# create a consumption name adder to add on to variable names
adder = '_to_CMP_total_total_total_total_mgd_fraction'
# build full variable names
for var in flow_list:
df = df.rename(columns={var: var + adder})
return df
def rename_water_data_2015(variables=None, all_variables=False) -> pd.DataFrame:
"""
Takes USGS 2015 flow values and calculated consumption fractions and renames them for higher description.
:return: returns a DataFrame of 2015 water flows and consumption fractions for agriculture
"""
# read in USGS 2015 flows and irrigation consumption calculations
df = prep_water_use_2015(all_variables=True)
# read in renaming data
df_names = get_water_use_rename_data()
# convert to dictionary
df_names = dict(zip(df_names.original_name, df_names.new_name))
# rename columns based on dictionary
df.rename(columns=df_names, inplace=True)
# return variables specified
if variables is None and all_variables is False:
variables = ['FIPS', "State", "County"]
df = df[variables]
elif variables is None and all_variables is True:
df = df
else:
df = df[variables]
return df
def calc_population_county_weight(df: pd.DataFrame) -> pd.DataFrame:
"""calculates the percentage of state total population by county and merges to provided dataframe
by variable 'State'. Used in splitting up state-level estimates to the county level.
:parameter df: dataframe of state-level values to combine with county population weights. Should only
include State column as the regional identifier and include state-level values.
:type df: Pandas DataFrame
:return: DataFrame of water consumption fractions for various sectors by county
"""
# read in USGS 2015 dataset
df_state = rename_water_data_2015(all_variables=True)
# collect required columns
df_state = df_state[['FIPS', 'State', 'County', 'population']]
# sum population data by state to get total state population
df_state_sum = df_state.groupby("State", as_index=False).sum()
df_state_sum = df_state_sum.rename(columns={"population": "state_pop_sum"}) # rename state total pop. column
# merge state total back to county-level population dataframe
df_state = pd.merge(df_state, df_state_sum, how='left', on='State')
# calculate population weight as county population divided by state total population
df_state['pop_weight'] = df_state['population'] / df_state['state_pop_sum']
# reduce to required output columns
df_state = df_state[['FIPS', 'State', 'County', 'pop_weight']]
# merge back with county level dataframe
df_state = pd.merge(df, df_state, how="left", on="State")
return df_state
def prep_water_use_1995(variables=None, all_variables=False) -> pd.DataFrame:
"""prepping 1995 water use data from USGS by replacing missing values, fixing FIPS codes,
and reducing to needed variables.
:param variables: None if no specific variables required in addition to FIPS code.
Default is None, otherwise a list of additional variables to include in
returned dataframe.
:type variables: list
:param all_variables: Include all available variables in returned dataframe. Default is False.
:type all_variables: bool
:return: DataFrame of water values for 1995 at the county level
"""
# read in 1995 | |
<reponame>timgates42/bokeh
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import warnings
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import (
JSON,
Any,
Bool,
ColumnData,
Dict,
Enum,
Instance,
Int,
List,
PandasDataFrame,
PandasGroupBy,
Seq,
String,
)
from ..model import Model
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_array
from ..util.warnings import BokehUserWarning
from .callbacks import Callback, CustomJS
from .filters import Filter
from .selections import Selection, SelectionPolicy, UnionRenderers
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'AjaxDataSource',
'CDSView',
'ColumnarDataSource',
'ColumnDataSource',
'DataSource',
'GeoJSONDataSource',
'ServerSentDataSource',
'WebSource',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class DataSource(Model):
''' A base class for data source types.
'''
selected = Instance(Selection, default=lambda: Selection(), readonly=True, help="""
A Selection that indicates selected indices on this ``DataSource``.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
.. note:
This property is left for backwards compatibility, but may be deprecated
in the future. Prefer ``source.selected.js_on_change(...)`` for new code.
""")
@abstract
class ColumnarDataSource(DataSource):
''' A base class for data source types, which can be mapped onto
a columnar format.
'''
selection_policy = Instance(SelectionPolicy, default=lambda: UnionRenderers(), help="""
An instance of a ``SelectionPolicy`` that determines how selections are set.
""")
class ColumnDataSource(ColumnarDataSource):
''' Maps names of columns to sequences or arrays.
The ``ColumnDataSource`` is a fundamental data structure of Bokeh. Most
plots, data tables, etc. will be driven by a ``ColumnDataSource``.
If the ``ColumnDataSource`` initializer is called with a single argument that
can be any of the following:
* A Python ``dict`` that maps string names to sequences of values, e.g.
lists, arrays, etc.
.. code-block:: python
data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])}
source = ColumnDataSource(data)
.. note::
``ColumnDataSource`` only creates a shallow copy of ``data``. Use e.g.
``ColumnDataSource(copy.deepcopy(data))`` if initializing from another
``ColumnDataSource.data`` object that you want to keep independent.
* A Pandas ``DataFrame`` object
.. code-block:: python
source = ColumnDataSource(df)
In this case the CDS will have columns corresponding to the columns of
the ``DataFrame``. If the ``DataFrame`` columns have multiple levels,
they will be flattened using an underscore (e.g. level_0_col_level_1_col).
The index of the ``DataFrame`` will be flattened to an ``Index`` of tuples
if it's a ``MultiIndex``, and then reset using ``reset_index``. The result
will be a column with the same name if the index was named, or
level_0_name_level_1_name if it was a named ``MultiIndex``. If the
``Index`` did not have a name or the ``MultiIndex`` name could not be
flattened/determined, the ``reset_index`` function will name the index column
``index``, or ``level_0`` if the name ``index`` is not available.
* A Pandas ``GroupBy`` object
.. code-block:: python
group = df.groupby(('colA', 'ColB'))
In this case the CDS will have columns corresponding to the result of
calling ``group.describe()``. The ``describe`` method generates columns
for statistical measures such as ``mean`` and ``count`` for all the
non-grouped original columns. The CDS columns are formed by joining
original column names with the computed measure. For example, if a
``DataFrame`` has columns ``'year'`` and ``'mpg'``. Then passing
``df.groupby('year')`` to a CDS will result in columns such as
``'mpg_mean'``
If the ``GroupBy.describe`` result has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
Note this capability to adapt ``GroupBy`` objects may only work with
Pandas ``>=0.20.0``.
.. note::
There is an implicit assumption that all the columns in a given
``ColumnDataSource`` all have the same length at all times. For this
reason, it is usually preferable to update the ``.data`` property
of a data source "all at once".
'''
data = ColumnData(String, Seq(Any), help="""
Mapping of column names to sequences of data. The columns can be, e.g,
Python lists or tuples, NumPy arrays, etc.
The .data attribute can also be set from Pandas DataFrames or GroupBy
objects. In these cases, the behaviour is identical to passing the objects
to the ``ColumnDataSource`` initializer.
""").accepts(
PandasDataFrame, lambda x: ColumnDataSource._data_from_df(x)
).accepts(
PandasGroupBy, lambda x: ColumnDataSource._data_from_groupby(x)
).asserts(lambda _, data: len(set(len(x) for x in data.values())) <= 1,
lambda obj, name, data: warnings.warn(
"ColumnDataSource's columns must be of the same length. " +
"Current lengths: %s" % ", ".join(sorted(str((k, len(v))) for k, v in data.items())), BokehUserWarning))
def __init__(self, *args, **kw):
''' If called with a single argument that is a dict or
``pandas.DataFrame``, treat that implicitly as the "data" attribute.
'''
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
elif pd and isinstance(raw_data, pd.core.groupby.GroupBy):
raw_data = self._data_from_groupby(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super().__init__(**kw)
self.data.update(raw_data)
@property
def column_names(self):
''' A list of the column names in this data source.
'''
return list(self.data)
@staticmethod
def _data_from_df(df):
''' Create a ``dict`` of columns from a Pandas ``DataFrame``,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
_df = df.copy()
# Flatten columns
if isinstance(df.columns, pd.MultiIndex):
try:
_df.columns = ['_'.join(col) for col in _df.columns.values]
except TypeError:
raise TypeError('Could not flatten MultiIndex columns. '
'use string column names or flatten manually')
# Transform columns CategoricalIndex in list
if isinstance(df.columns, pd.CategoricalIndex):
_df.columns = df.columns.tolist()
# Flatten index
index_name = ColumnDataSource._df_index_name(df)
if index_name == 'index':
_df.index = pd.Index(_df.index.values)
else:
_df.index = pd.Index(_df.index.values, name=index_name)
_df.reset_index(inplace=True)
tmp_data = {c: v.values for c, v in _df.items()}
new_data = {}
for k, v in tmp_data.items():
new_data[k] = v
return new_data
@staticmethod
def _data_from_groupby(group):
''' Create a ``dict`` of columns from a Pandas ``GroupBy``,
suitable for creating a ``ColumnDataSource``.
The data generated is the result of running ``describe``
on the group.
Args:
group (GroupBy) : data to convert
Returns:
dict[str, np.array]
'''
return ColumnDataSource._data_from_df(group.describe())
@staticmethod
def _df_index_name(df):
''' Return the Bokeh-appropriate column name for a ``DataFrame`` index
If there is no named index, then `"index" is returned.
If there is a single named index, then ``df.index.name`` is returned.
If there is a multi-index, and the index names are all strings, then
the names are joined with '_' and the result is returned, e.g. for a
multi-index ``['ind1', 'ind2']`` the result will be "ind1_ind2".
Otherwise if any index name is not a string, the fallback name "index"
is returned.
Args:
df (DataFrame) : the ``DataFrame`` to find an index name for
Returns:
str
'''
if df.index.name:
return df.index.name
elif df.index.names:
try:
return "_".join(df.index.names)
except TypeError:
return "index"
else:
return "index"
@classmethod
def from_df(cls, data):
''' Create a ``dict`` of columns from a Pandas ``DataFrame``,
suitable for creating a ``ColumnDataSource``.
Args:
data (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data)
@classmethod
def from_groupby(cls, data):
''' Create a ``dict`` of columns from a Pandas ``GroupBy``,
suitable for creating a ``ColumnDataSource``.
The data generated is the result of running ``describe``
on the group.
Args:
data (Groupby) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data.describe())
def to_df(self):
''' Convert this data source to pandas ``DataFrame``.
Returns:
DataFrame
'''
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
return pd.DataFrame(self.data)
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name
def remove(self, name):
''' Remove a column of data.
Args:
name (str) : name of the | |
import SCons.Script as scons
import sys
import os.path
import subprocess
import copy
import collections
import shutil
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import scons_common as common
class DependancyRegistry(object):
"""Class for maintaining build dependancies."""
def __init__(self):
# self.__deps[x] is the set() of all things upon which x directly
# depends.
self.__deps = {}
def add(self, node, *args):
"""Inidcate that node depends on everything in args."""
if not node in self.__deps:
self.__deps[node] = set()
self.__deps[node] = self.__deps[node].union(args)
def __get_incoming_for_subgraph(self, start_nodes, set_dd):
"""Given start_nodes, the 1st set of dependancies to a program, populate
set_dd, a collections.defaultdict(set) instance, so that set_dd[x] is
the set of things that depend upon x starting from start_nodes."""
for n in start_nodes:
if n in self.__deps:
self.__get_incoming_for_subgraph(self.__deps[n], set_dd)
for d in self.__deps[n]:
set_dd[d].add(n)
def get_ordered_deps(self, *nodes):
"""Uses a modified version of Kahn's topological sort algorithm to
return a correctly ordered list of dependancies starting with the
libraries listed in nodes. Pseudo-library (those the begin with '@' --
see the README) will be correctly resolved but will not be in the
returned list."""
# The set of starting nodes is just nodes, but we need to know how many
# things depend on each node for this particular sub-graph (the subgraph
# where we start with the nodes passed).
depends_on = collections.defaultdict(set)
self.__get_incoming_for_subgraph(nodes, depends_on)
# As the algorithm progress this is always the list of nodes that have
# no unprocess things depending on them. They can thus be inserted into
# the resulting list and expanded.
to_process = [n for n in nodes if not n in depends_on]
result = []
while len(to_process) > 0:
n = to_process.pop()
# Don't insert "pseduo-libs" into the set of returned dependancies.
if not isinstance(n, str) or not n.startswith('@'):
result.append(n)
if n in self.__deps:
for nd in self.__deps[n]:
depends_on[nd].remove(n)
if len(depends_on[nd]) == 0:
del(depends_on[nd])
to_process.append(nd)
if len(depends_on) != 0:
print ('Error. The dependancy graph starting with '
'%s contains cycles!' % map(str, nodes))
sys.exit(1)
return result
# If passed unit tests run via SconsCPPHelper.__RunSingleUnitTest are run under
# valgrind.
scons.AddOption('--valgrind', dest='valgrind', action='store_true',
default=False, help = 'When given in combination with '
'--test this runs unit tests under valgrind.')
# Compiles a version for profiling with gprof
scons.AddOption('--gprof', dest='gprof', action='store_true',
default=False, help='Compile with gprof support. If '
'given running the program will produce a gmon.out file '
'on exit which can be used with gprof')
class SconsCPPHelper(object):
"""Main class for building libraries, programs, and unit tests. See the
README for details."""
def __init__(self):
self.env = common.env
self.__deps = DependancyRegistry()
self.env['CCFLAGS'].extend(['-Wall'])
# The no-deprecated-declarations flag is because C++11 deprecates some
# standard libary class and methods (most notably auto_ptr). Until we
# convert all our code we'll need this flag.
self.env['CCFLAGS'].extend(['-Icpp', '-std=c++0x',
'-Wno-deprecated-declarations'])
if scons.GetOption('opt'):
# Note: -msse2 only works on x86_64. If we ever try to run on other
# architectures we'll have to disable this.
self.env['CCFLAGS'].extend(
['-O2', '-DNDEBUG', '-ftree-vectorize', '-msse2',])
self.build_dir = 'opt'
else:
self.env['CCFLAGS'].extend(['-g'])
self.build_dir = 'debug'
if scons.GetOption('gprof'):
self.env['CCFLAGS'].extend(['-pg'])
self.env['LINKFLAGS'].extend(['-pg'])
# output is the concatenation of the normal location (debug or opt)
# with -profile so the opt-profile directory contains an optimized
# build with profiling and the debug-profile dir contains debug
# build with profiling.
self.build_dir = self.build_dir + '-profile'
self.__AutoConf()
common.GenericTestBuilder(
'cpp_test', 'BoostTest', self.__RunSingleUnitTest)
# Scons has trouble keeping track of the current directory when you use
# VariantDir (the way we put the output of a build in a debug, opt, etc.
# subdirectory). If using variant dirs simple things like calling
# os.getcwd() return different results on different runs depending on
# wheather the variant dir has been created yet or not. Even worse,
# scons built-ins like File() and Dir() return *incorrect* results in
# nested dirs and in inconsistent ways dues to race conditions so that
# one call might return cpp/common/knot-impl/debug (correct), while the
# next might return cpp/commong/debug/knot-impl/debug (note the extra
# "debug" in the path name). One thing that *does* seem to consistently
# work is getting the srcnode() of the sconscript file *before* calling
# the sconscript file. We therefore use that trick to maintain our own
# current source and working directory in cur_src_dir and cur_var_dir.
# As long as you use the SConscript() method of this class to call
# subsidiary SConstruct files, those files can rely on these variables
# to give accurate paths.
# The '#' character means the root directory of the entire scons build.
# This is the cpp subdirectory of that.
self.cur_src_dir = os.path.join(str(scons.Dir('#')), 'cpp')
self.cur_var_dir = os.path.join(self.cur_src_dir, self.build_dir)
def __getattr__(self, attr):
"""Scons provides lots of builder methods and we don't need to override
all of them but we would like to make them all available to users. If we
haven't explicitly overridden it and our environment (self.env) has the
method we'll expose it like it's a method of this class. That way our
environment configuration is inherited and we can always override the
functionliaty in the future without needing to change any of our build
files."""
if hasattr(self.env, attr):
return getattr(self.env, attr)
else:
raise AttributeError('"%s" is not a valid builder method.' % attr)
def __AutoConf(self):
"""This does the equivalent of GNU autoconf - it tries to make the build
platform independent.
Note that I've only done the minimum amount necessary to get things to
compile on Ubuntu and CentOS (and specific versions of those to boot).
If you want to compile on anything else you will likely need to update
this."""
context = scons.Configure(self.env)
self.__lib_names = {}
# Check for boost libraries with various names
boost_libs = ['boost_thread', 'boost_regex',
'boost_unit_test_framework']
for lib in boost_libs:
# Prefer the multi-threaded versions (ending in -mt) if available
if context.CheckLib(lib + '-mt', language = 'C++'):
self.__lib_names[lib] = lib + '-mt'
elif context.CheckLib(lib, language = 'C++'):
self.__lib_names[lib] = lib
else:
print 'Error. Library %s not available' % lib
scons.Exit(1)
self.env = context.Finish()
def __get_platform_libs(self, libs):
result = []
for lib in libs:
if str(lib).startswith('#'):
result.append(self.__get_platform_lib(lib[1:]))
else:
result.append(lib)
return result
def __get_platform_lib(self, lib):
"""Some libraries can have different names/paths on different systems.
This takes a "canonical" library name and returns the platform-specific
name. For example, boost_thread is call boost_thread on Ubuntu but
boost_thread-mt on CentOS."""
if lib not in self.__lib_names:
print "Error! Library %s not found." % lib
sys.exit(1)
else:
return self.__lib_names[lib]
def Library(self, name, sources, libs = [], *args, **kwargs):
"""Build a library from the given sources. name is the name to give the
library. libs is the set of libraries this library depends on. *args and
**kwargs are passed directly to the scons Library rule."""
new_lib = self.env.Library(target = name, source = sources,
*args, **kwargs)
self.__deps.add('@' + name, new_lib[0])
self.__deps.add(new_lib[0], *libs)
def Program(self, name, sources, libs = [], *args, **kwargs):
"""Build a program from the given sources. name is the name to give the
library. libs is the set of libraries this program depends on. *args and
**kwargs are passed directly to the scons Library rule."""
all_libs = self.__deps.get_ordered_deps(*libs)
# For some libraries we use a "canonical" name. This maps that to the
# platoform-specific name.
all_libs = self.__get_platform_libs(all_libs)
program = self.env.Program(name, sources,
LIBS = all_libs, *args, **kwargs)
return program
def Lemon(self, src):
"""Run lemon to compile a .y file into a .cc and a .h file. The genrated
files have the same base name as src but end with .cc or .h.
Args:
src: the name of the source file. Assumed to be in the same
directory as the SConscript file from which builder.Lemon() was
called.
Returns:
scons node object for the .cc file generated. This can then be used
as input to other build rules. This does not return the .h file as
that is typically not used in build rules. The single file is
returned in a list as that is scons convention.
The output files will be put into a variant subdirectory (e.g. ./debug
for a debug build). We could put | |
db.engine.execute(global_statistics_table.update(where, new))
else:
db.engine.execute(global_statistics_table.insert(new))
def get_global_statistics_for_day(travelled_distances_rows, time):
time_end = time + timedelta(days=1)
time_start = time_end - timedelta(days=7)
query = '''
SELECT COUNT(DISTINCT user_id)
FROM travelled_distances
WHERE time < :time_end
AND time >= :time_start
'''
user_id_count_row = db.engine.execute(text(query), time_start=time_start, time_end=time_end).fetchone()
id_count = user_id_count_row["count"]
distance_sum = 0
total_co2 = 0
for row in travelled_distances_rows:
if row["total_distance"]:
distance_sum += row["total_distance"]
total_co2 += row["total_distance"] * row["average_co2"]
if distance_sum == 0:
average_co2 = 0
else:
average_co2 = total_co2 / distance_sum
return {'time':time,
'average_co2_usage':average_co2,
'past_week_certificates_number':id_count,
'total_distance':distance_sum}
def generate_csv(rows):
# Poor man's CSV generation. Doesn't handle escaping properly.
# Python's CSV library doesn't handle unicode, and seems to be
# tricky to wrap as a generator (expected by Flask)
yield '"device_id";"time";"longitude";"latitude";"accuracy";"activity_guess_1";"activity_guess_1_conf";"activity_guess_2";"activity_guess_2_conf";"activity_guess_3";"activity_guess_3_conf";"waypoint_id"\n'
def to_str(x):
if x is None:
return ''
return str(x)
for row in rows:
yield ';'.join(['"%s"' % (to_str(x)) for x in row]) + '\n'
def get_distinct_device_ids(datetime_start, datetime_end):
return db.engine.execute(text('''
SELECT DISTINCT device_id
FROM device_data
WHERE time >= :date_start
AND time < :date_end;
'''), date_start=str(datetime_start), date_end=str(datetime_end))
def get_filtered_device_data_points(user_id, datetime_start, datetime_end):
"""Get trace with activity stabilized and mass transit detected, fusing
legs and raw device data."""
dd = db.metadata.tables["device_data"]
legs = db.metadata.tables["leg_modes"]
# Adjacent legs both cover their join point, but only if considered close
# enough, so retrieving each point only once for the filtered data flavor
# requires some finessing...
legs = select(
[ func.lag(legs.c.time_start) \
.over(partition_by=legs.c.user_id, order_by=legs.c.time_start)\
.label("prev_end"),
legs.c.device_id,
legs.c.time_start,
legs.c.time_end,
legs.c.activity,
legs.c.line_type,
legs.c.line_name],
and_(
legs.c.user_id == user_id,
legs.c.activity != None,
legs.c.time_start <= datetime_end,
legs.c.time_end >= datetime_start)).alias("lagged")
return db.engine.execute(select(
[ func.ST_AsGeoJSON(dd.c.coordinate).label("geojson"),
dd.c.time,
legs.c.activity,
legs.c.line_type,
legs.c.line_name],
and_(
dd.c.time >= datetime_start,
dd.c.time < datetime_end),
legs.join(dd, and_(
legs.c.device_id == dd.c.device_id,
between(dd.c.time, legs.c.time_start, legs.c.time_end),
or_(legs.c.prev_end == None, dd.c.time > legs.c.prev_end))),
order_by=dd.c.time))
def get_filtered_device_data_points_OLD(user_id, datetime_start, datetime_end):
"""Get trace with activity stabilized and mass transit detected, from
legacy device_data_filtered."""
query = '''
SELECT time,
ST_AsGeoJSON(coordinate) AS geojson,
activity,
line_type,
line_name
FROM device_data_filtered
WHERE user_id = :user_id
AND time >= :time_start
AND time < :time_end
ORDER BY time ASC
'''
points = db.engine.execute(text(query), user_id=user_id, time_start=datetime_start, time_end=datetime_end)
return points
def data_points_by_user_id_after(user_id, datetime_start, datetime_end):
query = '''
SELECT device_id,
ST_AsGeoJSON(coordinate) AS geojson,
activity_1, activity_1_conf,
activity_2, activity_2_conf,
activity_3, activity_3_conf,
waypoint_id,
time
FROM device_data
WHERE device_id IN (SELECT id FROM devices WHERE user_id = :user_id)
AND time > :time_start
AND time < :time_end
ORDER BY time ASC
'''
points = db.engine.execute(text(query), user_id=user_id, time_start=datetime_start, time_end=datetime_end)
return points
def data_points_snapping(device_id, datetime_start, datetime_end):
qstart = '''
SELECT id,
ST_AsGeoJSON(coordinate) AS geojson,
accuracy,
activity_1, activity_1_conf,
activity_2, activity_2_conf,
activity_3, activity_3_conf,
waypoint_id,
time
FROM device_data
'''
if device_id == 0:
qstring = qstart + '''
WHERE time >= :time_start
AND time < :time_end
ORDER BY time ASC
'''
points = db.engine.execute(text(qstring), time_start=datetime_start, time_end=datetime_end)
else:
qstring = qstart + '''
WHERE device_id = :device_id
AND time >= :time_start
AND time < :time_end
ORDER BY time ASC
'''
points = db.engine.execute(text(qstring), device_id=device_id, time_start=datetime_start, time_end=datetime_end)
return points
def get_waypoint_id_from_coordinate(coordinate):
"""Return the identifier of the waypoint closest to a given coordinate.
:param: coordinate (geography(Point,4326))
:return: waypoint_id (bigint)
"""
try:
row = db.engine.execute(text("""
SELECT id
FROM roads_waypoints
JOIN waypoints
ON waypoint_id = waypoints.id
LEFT JOIN LATERAL (
SELECT osm_id
FROM roads
WHERE ST_DWithin(roads.geo, :coordinate, 100)
ORDER BY ST_Distance(roads.geo, :coordinate) ASC
LIMIT 1
) AS road ON true
WHERE road_id = road.osm_id
ORDER BY ST_Distance(waypoints.geo, :coordinate) ASC
LIMIT 1 ;"""), coordinate=coordinate).first()
if not row:
return None
return int(row[0])
except DataError as e:
print('Exception in get_waypoint_id_from_coordinate: ' + e.message)
return None
def match_mass_transit_legs(device, tstart, tend, activity):
"""Find mass transit matches already recorded in an existing leg, or None
if no leg matching start/end/activity."""
legs = db.metadata.tables["legs"]
modes = db.metadata.tables["modes"]
where = and_(
legs.c.device_id == device,
legs.c.time_start == tstart,
legs.c.time_end == tend,
legs.c.activity == activity)
if not db.engine.execute(select([exists().where(where)])).scalar():
return None
return db.engine.execute(select(
[modes.c.source, modes.c.mode, modes.c.line],
where,
legs.join(modes))).fetchall()
def match_mass_transit_filtered(device, tstart, tend):
"""Find mass transit match from legacy filtered data. Returns None if
no filtered data for this device beyond end of range; otherwise (line_type,
line_name) pair."""
# Find out if filtered data extends this far forward. Due to gaps, looking
# at just the given range would lead to firing actual detectors for very
# old data.
if None is db.engine.execute(select(
[1],
and_(
device_data_filtered_table.c.time >= tend,
devices_table.c.id == device),
device_data_filtered_table.join(
devices_table,
device_data_filtered_table.c.user_id==devices_table.c.user_id),
).limit(1)).scalar():
return None
# XXX Should rather ORDER BY (line_type, line_name), so the pair can't be
# mismatched, but the result row record seems like a pain to unpack?
return db.engine.execute(
text("""
SELECT
mode() WITHIN GROUP (ORDER BY line_type) line_type,
mode() WITHIN GROUP (ORDER BY line_name) line_name
FROM device_data_filtered f JOIN devices d USING (user_id)
WHERE d.id = :device AND time BETWEEN :tstart AND :tend"""),
device=device, tstart=tstart, tend=tend).first()
def match_mass_transit_live(device, tstart, tend, tradius, dradius, nsamples):
"""Find mass transit vehicles near user during a trip leg.
Arguments:
device -- device_data.device_id
tstart -- start timestamp of leg
tend -- end timestamp of leg
tradius -- slack allowed in seconds between device and vehicle data point
dradius -- slack allowed in metres between device and vehicle data point
nsamples -- match using given number of points at most
Result columns:
revsum -- each metre inside dradius counts toward the summed distance score
hitrate -- fraction of times device and vehicle within dradius and tradius
vehicle_ref, line_type, line_name -- as in mass_transit_data
If no mass transit vehicle data exists prior to tstart, as would be the
case when older data has been deleted, return None.
"""
# Find out if mass_transit_data extends this far back.
if None is db.engine.execute(select(
[1], mass_transit_data_table.c.time <= tstart).limit(1)).scalar():
return None
return db.engine.execute(
text("""
-- find the relevant device data points
WITH fulltrace AS (
SELECT coordinate, id, time, row_number() OVER (ORDER BY time) rn
FROM device_data
WHERE device_id = :device AND time >= :tstart AND time <= :tend),
-- sample from full trace to limit quadratic matching; uses integer sampling
-- interval so will end up with between [n/2, n] samples
trace AS (
SELECT *
FROM fulltrace
WHERE rn % (
SELECT ceil((1 + max(rn)) / (1.0 + :nsamples)) FROM fulltrace) = 0),
-- rough bbox margin meters to degrees of lon at lat, so overshoots on latitude
m2lon AS (
SELECT :dradius
/ cos(pi() * max(abs(ST_Y(coordinate::geometry))) / 180)
/ 110574 AS x
FROM trace),
-- bounding box for mass transit data, expand not symmetric in m but that's ok
bbox AS (
SELECT ST_Expand(ST_Extent(coordinate::geometry), (SELECT x from m2lon)) x
FROM trace),
-- bound the mass transit data in time and space
boxed AS (
SELECT coordinate, vehicle_ref, line_type, line_name, time
FROM mass_transit_data
WHERE time > timestamp :tstart - interval ':tradius seconds'
AND time < timestamp :tend + interval ':tradius seconds'
AND coordinate::geometry @ (SELECT x FROM bbox)),
-- Join device points with linestrings of the trace of each vehicle around that
-- time. The line_name changes randomly on some vehicles, pick most frequent.
linetraces AS (
SELECT
d.id,
d.coordinate,
m.vehicle_ref,
ST_MakeLine(m.coordinate::geometry ORDER BY m.time) line_trace,
mode() WITHIN GROUP (ORDER BY m.line_type) line_type,
mode() WITHIN GROUP (ORDER BY m.line_name) line_name
FROM boxed m JOIN trace d
ON abs(extract(epoch from (m.time - d.time))) <= :tradius
GROUP BY d.id, d.coordinate, m.vehicle_ref),
-- Score matches by the distance inward of the match radius.
nearest AS (
SELECT
id,
vehicle_ref,
line_type,
line_name,
:dradius - ST_Distance(line_trace, coordinate) revdist
FROM linetraces
WHERE ST_Distance(line_trace, coordinate) <= :dradius)
-- Sum scores and count matches over user location trace. Some vehicles'
-- line_name and other fields flip randomly, pick most frequent.
SELECT
sum(revdist) revsum,
1.0 * count(*) / (SELECT count(*) FROM trace) hitrate,
vehicle_ref,
mode() WITHIN GROUP (ORDER BY line_type) line_type,
mode() WITHIN GROUP (ORDER BY line_name) line_name
FROM nearest
GROUP BY vehicle_ref order by hitrate desc, revsum desc"""),
device=device,
tstart=tstart,
tend=tend,
tradius=tradius,
dradius=dradius,
nsamples=nsamples)
def hsl_alerts_insert(alerts):
if alerts:
db.engine.execute(hsl_alerts_table.insert(alerts))
def hsl_alerts_get_max():
"""
:return: max_alert_id, max_alert_end (max int, max timestamp)
"""
try:
max_alert_id = None
query = select([func.max(hsl_alerts_table.c.alert_id)])
row = db.engine.execute(query).first()
if row and row[0]:
max_alert_id = int(row[0])
max_alert_end = None
query = select([func.max(hsl_alerts_table.c.alert_end)])
row = db.engine.execute(query).first()
if row and row[0]:
max_alert_end = row[0]
return max_alert_id, max_alert_end
except DataError as e:
print('Exception in hsl_alerts_get_max: ' + | |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Cisco Systems Inc.
# @author: <NAME>, Cisco Systems Inc.
# @author: <NAME>, Cisco Systems Inc.
from sqlalchemy.orm import exc as s_exc
from testtools import matchers
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.plugins.cisco.common import cisco_constants
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import n1kv_models_v2
from neutron.tests import base
from neutron.tests.unit import test_db_plugin as test_plugin
PHYS_NET = 'physnet1'
PHYS_NET_2 = 'physnet2'
VLAN_MIN = 10
VLAN_MAX = 19
VLAN_RANGES = {PHYS_NET: [(VLAN_MIN, VLAN_MAX)]}
UPDATED_VLAN_RANGES = {PHYS_NET: [(VLAN_MIN + 20, VLAN_MAX + 20)],
PHYS_NET_2: [(VLAN_MIN + 40, VLAN_MAX + 40)]}
VXLAN_MIN = 5000
VXLAN_MAX = 5009
VXLAN_RANGES = [(VXLAN_MIN, VXLAN_MAX)]
UPDATED_VXLAN_RANGES = [(VXLAN_MIN + 20, VXLAN_MAX + 20)]
SEGMENT_RANGE = '200-220'
SEGMENT_RANGE_MIN_OVERLAP = '210-230'
SEGMENT_RANGE_MAX_OVERLAP = '190-209'
SEGMENT_RANGE_OVERLAP = '190-230'
TEST_NETWORK_ID = 'abcdefghijklmnopqrstuvwxyz'
TEST_NETWORK_ID2 = 'abcdefghijklmnopqrstuvwxy2'
TEST_NETWORK_ID3 = 'abcdefghijklmnopqrstuvwxy3'
TEST_NETWORK_PROFILE = {'name': 'test_profile',
'segment_type': 'vlan',
'physical_network': 'physnet1',
'segment_range': '10-19'}
TEST_NETWORK_PROFILE_2 = {'name': 'test_profile_2',
'segment_type': 'vlan',
'physical_network': 'physnet1',
'segment_range': SEGMENT_RANGE}
TEST_NETWORK_PROFILE_VXLAN = {'name': 'test_profile',
'segment_type': 'overlay',
'sub_type': 'native_vxlan',
'segment_range': '5000-5009',
'multicast_ip_range': '172.16.31.10-192.168.3.11'}
TEST_POLICY_PROFILE = {'id': '4a417990-76fb-11e2-bcfd-0800200c9a66',
'name': 'test_policy_profile'}
TEST_NETWORK_PROFILE_MULTI_SEGMENT = {'name': 'test_profile',
'segment_type': 'multi-segment'}
TEST_NETWORK_PROFILE_VLAN_TRUNK = {'name': 'test_profile',
'segment_type': 'trunk',
'sub_type': 'vlan'}
TEST_NETWORK_PROFILE_VXLAN_TRUNK = {'name': 'test_profile',
'segment_type': 'trunk',
'sub_type': 'overlay'}
def _create_test_network_profile_if_not_there(session,
profile=TEST_NETWORK_PROFILE):
try:
_profile = session.query(n1kv_models_v2.NetworkProfile).filter_by(
name=profile['name']).one()
except s_exc.NoResultFound:
_profile = n1kv_db_v2.create_network_profile(session, profile)
return _profile
def _create_test_policy_profile_if_not_there(session,
profile=TEST_POLICY_PROFILE):
try:
_profile = session.query(n1kv_models_v2.PolicyProfile).filter_by(
name=profile['name']).one()
except s_exc.NoResultFound:
_profile = n1kv_db_v2.create_policy_profile(profile)
return _profile
class VlanAllocationsTest(base.BaseTestCase):
def setUp(self):
super(VlanAllocationsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
n1kv_db_v2.sync_vlan_allocations(self.session, VLAN_RANGES)
self.addCleanup(db.clear_db)
def test_sync_vlan_allocations_outside_segment_range(self):
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MIN - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MAX + 1)
n1kv_db_v2.sync_vlan_allocations(self.session, UPDATED_VLAN_RANGES)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MIN + 20 - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MAX + 20 + 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 40 - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MAX + 40 + 1)
n1kv_db_v2.sync_vlan_allocations(self.session, VLAN_RANGES)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 20)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 20)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MAX + 20)
def test_sync_vlan_allocations_unallocated_vlans(self):
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN).allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX).allocated)
n1kv_db_v2.sync_vlan_allocations(self.session, UPDATED_VLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN + 20).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN + 20 + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX + 20 - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, PHYS_NET,
VLAN_MAX + 20).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MIN + 40).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MIN + 40 + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MAX + 40 - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MAX + 40).
allocated)
def test_vlan_pool(self):
vlan_ids = set()
p = _create_test_network_profile_if_not_there(self.session)
for x in xrange(VLAN_MIN, VLAN_MAX + 1):
(physical_network, seg_type,
vlan_id, m_ip) = n1kv_db_v2.reserve_vlan(self.session, p)
self.assertEqual(physical_network, PHYS_NET)
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
vlan_ids.add(vlan_id)
self.assertRaises(n_exc.NoNetworkAvailable,
n1kv_db_v2.reserve_vlan,
self.session,
p)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop(),
VLAN_RANGES)
physical_network, seg_type, vlan_id, m_ip = (n1kv_db_v2.reserve_vlan(
self.session, p))
self.assertEqual(physical_network, PHYS_NET)
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
vlan_ids.add(vlan_id)
for vlan_id in vlan_ids:
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id,
VLAN_RANGES)
def test_specific_vlan_inside_pool(self):
vlan_id = VLAN_MIN + 5
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
n1kv_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
self.assertTrue(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
self.assertRaises(n_exc.VlanIdInUse,
n1kv_db_v2.reserve_specific_vlan,
self.session,
PHYS_NET,
vlan_id)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
def test_specific_vlan_outside_pool(self):
vlan_id = VLAN_MAX + 5
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
vlan_id)
n1kv_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
self.assertTrue(n1kv_db_v2.get_vlan_allocation(self.session, PHYS_NET,
vlan_id).allocated)
self.assertRaises(n_exc.VlanIdInUse,
n1kv_db_v2.reserve_specific_vlan,
self.session,
PHYS_NET,
vlan_id)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
vlan_id)
class VxlanAllocationsTest(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin):
def setUp(self):
super(VxlanAllocationsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
n1kv_db_v2.sync_vxlan_allocations(self.session, VXLAN_RANGES)
self.addCleanup(db.clear_db)
def test_sync_vxlan_allocations_outside_segment_range(self):
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN - 1))
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 1))
n1kv_db_v2.sync_vxlan_allocations(self.session, UPDATED_VXLAN_RANGES)
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 20 - 1))
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 20 + 1))
def test_sync_vxlan_allocations_unallocated_vxlans(self):
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN).allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX).allocated)
n1kv_db_v2.sync_vxlan_allocations(self.session, UPDATED_VXLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 20).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 20 + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 20 - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 20).
allocated)
def test_vxlan_pool(self):
vxlan_ids = set()
profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE_VXLAN)
for x in xrange(VXLAN_MIN, VXLAN_MAX + 1):
vxlan = n1kv_db_v2.reserve_vxlan(self.session, profile)
vxlan_id = vxlan[2]
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
vxlan_ids.add(vxlan_id)
self.assertRaises(n_exc.NoNetworkAvailable,
n1kv_db_v2.reserve_vxlan,
self.session,
profile)
n1kv_db_v2.release_vxlan(self.session, vxlan_ids.pop(), VXLAN_RANGES)
vxlan = n1kv_db_v2.reserve_vxlan(self.session, profile)
vxlan_id = vxlan[2]
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
vxlan_ids.add(vxlan_id)
for vxlan_id in vxlan_ids:
n1kv_db_v2.release_vxlan(self.session, vxlan_id, VXLAN_RANGES)
n1kv_db_v2.delete_network_profile(self.session, profile.id)
def test_specific_vxlan_inside_pool(self):
vxlan_id = VXLAN_MIN + 5
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
n1kv_db_v2.reserve_specific_vxlan(self.session, vxlan_id)
self.assertTrue(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
self.assertRaises(c_exc.VxlanIdInUse,
n1kv_db_v2.reserve_specific_vxlan,
self.session,
vxlan_id)
n1kv_db_v2.release_vxlan(self.session, vxlan_id, VXLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
def test_specific_vxlan_outside_pool(self):
vxlan_id = VXLAN_MAX + 5
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id))
n1kv_db_v2.reserve_specific_vxlan(self.session, vxlan_id)
self.assertTrue(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
self.assertRaises(c_exc.VxlanIdInUse,
n1kv_db_v2.reserve_specific_vxlan,
self.session,
vxlan_id)
n1kv_db_v2.release_vxlan(self.session, vxlan_id, VXLAN_RANGES)
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id))
class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
super(NetworkBindingsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_add_network_binding(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(self.session)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'vlan',
PHYS_NET, 1234, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'vlan')
self.assertEqual(binding.physical_network, PHYS_NET)
self.assertEqual(binding.segmentation_id, 1234)
def test_create_multi_segment_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'multi-segment',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'multi-segment')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_add_multi_segment_binding(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'multi-segment',
None, 0, '0.0.0.0', p.id,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'multi-segment')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
ms_binding = (n1kv_db_v2.get_multi_segment_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, TEST_NETWORK_ID3)))
self.assertIsNotNone(ms_binding)
self.assertEqual(ms_binding.multi_segment_id, TEST_NETWORK_ID)
self.assertEqual(ms_binding.segment1_id, TEST_NETWORK_ID2)
self.assertEqual(ms_binding.segment2_id, TEST_NETWORK_ID3)
ms_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(ms_members,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
self.session, TEST_NETWORK_ID2))
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
self.session, TEST_NETWORK_ID3))
n1kv_db_v2.del_multi_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
ms_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(ms_members, [])
def test_create_vlan_trunk_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_create_vxlan_trunk_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_add_vlan_trunk_binding(self):
with self.network() as network1:
with self.network() as network2:
TEST_NETWORK_ID = network1['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
TEST_NETWORK_ID2 = network2['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID2)
p_v = _create_test_network_profile_if_not_there(self.session)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID2, 'vlan',
PHYS_NET, 1234, '0.0.0.0', p_v.id, None)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, [(TEST_NETWORK_ID2, 0)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertEqual(binding.physical_network, PHYS_NET)
self.assertEqual(binding.segmentation_id, 0)
t_binding = (n1kv_db_v2.get_trunk_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, 0)))
self.assertIsNotNone(t_binding)
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
self.assertEqual(t_binding.dot1qtag, '0')
t_members = (n1kv_db_v2.get_trunk_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members,
[(TEST_NETWORK_ID2, '0')])
self.assertTrue(n1kv_db_v2.is_trunk_member(
self.session, TEST_NETWORK_ID2))
n1kv_db_v2.del_trunk_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, '0')])
t_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members, [])
def test_add_vxlan_trunk_binding(self):
with self.network() as network1:
with self.network() as network2:
TEST_NETWORK_ID = network1['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
TEST_NETWORK_ID2 = network2['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID2)
p_v = _create_test_network_profile_if_not_there(
self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID2, 'overlay',
None, 5100, '172.16.31.10', p_v.id, None)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id,
[(TEST_NETWORK_ID2, 5)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
t_binding = (n1kv_db_v2.get_trunk_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, '5')))
self.assertIsNotNone(t_binding)
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
self.assertEqual(t_binding.dot1qtag, '5')
t_members = (n1kv_db_v2.get_trunk_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members,
[(TEST_NETWORK_ID2, '5')])
self.assertTrue(n1kv_db_v2.is_trunk_member(
self.session, TEST_NETWORK_ID2))
n1kv_db_v2.del_trunk_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, '5')])
t_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members, [])
class NetworkProfileTests(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin):
def setUp(self):
super(NetworkProfileTests, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_create_network_profile(self):
_db_profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE)
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE['name']).one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_multi_segment_network_profile(self):
_db_profile = (n1kv_db_v2.create_network_profile(
self.session, TEST_NETWORK_PROFILE_MULTI_SEGMENT))
self.assertIsNotNone(_db_profile)
db_profile = (
self.session.query(
n1kv_models_v2.NetworkProfile).filter_by(
name=TEST_NETWORK_PROFILE_MULTI_SEGMENT['name'])
.one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
| |
import numpy as np
import tensorflow as tf
import librosa
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
import sys
import json
import glob
from Input import Input
import Models.UnetAudioSeparator
import Models.UnetSpectrogramSeparator
import musdb
import museval
import Utils
def predict(track, model_config, load_model, results_dir=None):
'''
Function in accordance with MUSB evaluation API. Takes MUSDB track object and computes corresponding source estimates, as well as calls evlauation script.
Model has to be saved beforehand into a pickle file containing model configuration dictionary and checkpoint path!
:param track: Track object
:param results_dir: Directory where SDR etc. values should be saved
:return: Source estimates dictionary
'''
# Determine input and output shapes, if we use U-net as separator
disc_input_shape = [model_config["batch_size"], model_config["num_frames"], 0] # Shape of discriminator input
if model_config["network"] == "unet":
separator_class = Models.UnetAudioSeparator.UnetAudioSeparator(model_config["num_layers"], model_config["num_initial_filters"],
output_type=model_config["output_type"],
context=model_config["context"],
mono=(model_config["num_channels"]==1 or model_config["mono_downmix"]),
upsampling=model_config["upsampling"],
num_sources=model_config["num_sources"],
filter_size=model_config["filter_size"],
merge_filter_size=model_config["merge_filter_size"])
elif model_config["network"] == "unet_spectrogram":
separator_class = Models.UnetSpectrogramSeparator.UnetSpectrogramSeparator(model_config["num_layers"], model_config["num_initial_filters"],
mono=(model_config["num_channels"]==1 or model_config["mono_downmix"]),
num_sources=model_config["num_sources"])
else:
raise NotImplementedError
sep_input_shape, sep_output_shape = separator_class.get_padding(np.array(disc_input_shape))
separator_func = separator_class.get_output
# Batch size of 1
sep_input_shape[0] = 1
sep_output_shape[0] = 1
mix_context, sources = Input.get_multitrack_placeholders(sep_output_shape, model_config["num_sources"], sep_input_shape, "input")
print("Evaluating...", file=sys.stderr)
# BUILD MODELS
# Separator
separator_sources = separator_func(mix_context, False, reuse=False)
# Start session and queue input threads
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Load model
# Load pretrained model to continue training, if we are supposed to
restorer = tf.train.Saver(None, write_version=tf.train.SaverDef.V2)
print("Num of variables" + str(len(tf.global_variables())), file=sys.stderr)
restorer.restore(sess, load_model)
print('Pre-trained model restored for song prediction', file=sys.stderr)
mix_audio, orig_sr, mix_channels = track.audio, track.rate, track.audio.shape[1] # Audio has (n_samples, n_channels) shape
separator_preds = predict_track(model_config, sess, mix_audio, orig_sr, sep_input_shape, sep_output_shape, separator_sources, mix_context)
# Upsample predicted source audio and convert to stereo
pred_audio = [Utils.resample(pred, model_config["expected_sr"], orig_sr) for pred in separator_preds]
if model_config["mono_downmix"] and mix_channels > 1: # Convert to multichannel if mixture input was multichannel by duplicating mono estimate
pred_audio = [np.tile(pred, [1, mix_channels]) for pred in pred_audio]
# Set estimates depending on estimation task (voice or multi-instrument separation)
if model_config["task"] == "voice": # [acc, vocals] order
estimates = {
'vocals' : pred_audio[1],
'accompaniment' : pred_audio[0]
}
else: # [bass, drums, other, vocals]
estimates = {
'bass' : pred_audio[0],
'drums' : pred_audio[1],
'other' : pred_audio[2],
'vocals' : pred_audio[3]
}
# Evaluate using museval, if we are currently evaluating MUSDB
if results_dir is not None:
scores = museval.eval_mus_track(track, estimates, output_dir=results_dir)
# print nicely formatted mean scores
print(scores, file=sys.stderr)
# Close session, clear computational graph
sess.close()
tf.reset_default_graph()
return estimates
def predict_track(model_config, sess, mix_audio, mix_sr, sep_input_shape, sep_output_shape, separator_sources, mix_context):
'''
Outputs source estimates for a given input mixture signal mix_audio [n_frames, n_channels] and a given Tensorflow session and placeholders belonging to the prediction network.
It iterates through the track, collecting segment-wise predictions to form the output.
:param model_config: Model configuration dictionary
:param sess: Tensorflow session used to run the network inference
:param mix_audio: [n_frames, n_channels] audio signal (numpy array). Can have higher sampling rate or channels than the model supports, will be downsampled correspondingly.
:param mix_sr: Sampling rate of mix_audio
:param sep_input_shape: Input shape of separator ([batch_size, num_samples, num_channels])
:param sep_output_shape: Input shape of separator ([batch_size, num_samples, num_channels])
:param separator_sources: List of Tensorflow tensors that represent the output of the separator network
:param mix_context: Input tensor of the network
:return:
'''
# Load mixture, convert to mono and downsample then
assert(len(mix_audio.shape) == 2)
if model_config["mono_downmix"]:
mix_audio = np.mean(mix_audio, axis=1, keepdims=True)
elif mix_audio.shape[1] == 1 and model_config["num_channels"] > 1:
# Duplicate channels if input is mono but model is stereo
mix_audio = np.tile(mix_audio, [1, 2])
mix_audio = Utils.resample(mix_audio, mix_sr, model_config["expected_sr"])
# Preallocate source predictions (same shape as input mixture)
source_time_frames = mix_audio.shape[0]
source_preds = [np.zeros(mix_audio.shape, np.float32) for _ in range(model_config["num_sources"])]
input_time_frames = sep_input_shape[1]
output_time_frames = sep_output_shape[1]
# Pad mixture across time at beginning and end so that neural network can make prediction at the beginning and end of signal
pad_time_frames = (input_time_frames - output_time_frames) // 2
mix_audio_padded = np.pad(mix_audio, [(pad_time_frames, pad_time_frames), (0,0)], mode="constant", constant_values=0.0)
# Iterate over mixture magnitudes, fetch network rpediction
for source_pos in range(0, source_time_frames, output_time_frames):
# If this output patch would reach over the end of the source spectrogram, set it so we predict the very end of the output, then stop
if source_pos + output_time_frames > source_time_frames:
source_pos = source_time_frames - output_time_frames
# Prepare mixture excerpt by selecting time interval
mix_part = mix_audio_padded[source_pos:source_pos + input_time_frames,:]
mix_part = np.expand_dims(mix_part, axis=0)
source_parts = sess.run(separator_sources, feed_dict={mix_context: mix_part})
# Save predictions
# source_shape = [1, freq_bins, acc_mag_part.shape[2], num_chan]
for i in range(model_config["num_sources"]):
source_preds[i][source_pos:source_pos + output_time_frames] = source_parts[i][0, :, :]
return source_preds
def produce_musdb_source_estimates(model_config, load_model, musdb_path, output_path, subsets=None,
is_wav=False, setup_file=None):
'''
Predicts source estimates for MUSDB for a given model checkpoint and configuration, and evaluate them.
:param model_config: Model configuration of the model to be evaluated
:param load_model: Model checkpoint path
:return:
'''
print("Evaluating trained model saved at " + str(load_model)+ " on MUSDB and saving source estimate audio to " + str(output_path), file=sys.stderr)
mus = musdb.DB(root_dir=musdb_path, is_wav=is_wav, setup_file=setup_file)
predict_fun = lambda track : predict(track, model_config, load_model, output_path)
#assert(mus.test(predict_fun))
mus.run(predict_fun, estimates_dir=output_path, subsets=subsets)
def produce_source_estimates(model_config, load_model, input_path, output_path=None):
'''
For a given input mixture file, saves source predictions made by a given model.
:param model_config: Model configuration
:param load_model: Model checkpoint path
:param input_path: Path to input mixture audio file
:param output_path: Output directory where estimated sources should be saved. Defaults to the same folder as the input file, if not given
:return: Dictionary of source estimates containing the source signals as numpy arrays
'''
print("Producing source estimates for input mixture file " + input_path, file=sys.stderr)
# Prepare input audio as track object (in the MUSDB sense), so we can use the MUSDB-compatible prediction function
audio, sr = Utils.load(input_path, sr=None, mono=False)
# Create something that looks sufficiently like a track object to our MUSDB function
class TrackLike(object):
def __init__(self, audio, rate, shape):
self.audio = audio
self.rate = rate
self.shape = shape
track = TrackLike(audio, sr, audio.shape)
sources_pred = predict(track, model_config, load_model) # Input track to prediction function, get source estimates
# Save source estimates as audio files into output dictionary
input_folder, input_filename = os.path.split(input_path)
if output_path is None:
# By default, set it to the input_path folder
output_path = input_folder
if not os.path.exists(output_path):
print("WARNING: Given output path " + output_path + " does not exist. Trying to create it...", file=sys.stderr)
os.makedirs(output_path)
assert(os.path.exists(output_path))
for source_name, source_audio in sources_pred.items():
librosa.output.write_wav(os.path.join(output_path, input_filename) + "_" + source_name + ".wav", source_audio, sr)
def compute_mean_metrics(json_folder, compute_averages=True):
files = glob.glob(os.path.join(json_folder, "*.json"))
sdr_inst_list = None
for path in files:
#print(path, file=sys.stderr)
with open(path, "r") as f:
js = json.load(f)
if sdr_inst_list is None:
sdr_inst_list = [list() for _ in range(len(js["targets"]))]
for i in range(len(js["targets"])):
sdr_inst_list[i].extend([np.float(f['metrics']["SDR"]) for f in js["targets"][i]["frames"]])
#return np.array(sdr_acc), np.array(sdr_voc)
sdr_inst_list = [np.array(sdr) for sdr in sdr_inst_list]
if compute_averages:
return [(np.nanmedian(sdr), np.nanmedian(np.abs(sdr - np.nanmedian(sdr))), np.nanmean(sdr), np.nanstd(sdr)) for sdr in sdr_inst_list]
else:
return sdr_inst_list
def draw_violin_sdr(json_folder):
acc, voc = compute_mean_metrics(json_folder, compute_averages=False)
acc = acc[~np.isnan(acc)]
voc = voc[~np.isnan(voc)]
data = [acc, voc]
inds = [1,2]
fig, ax = plt.subplots()
ax.violinplot(data, showmeans=True, showmedians=False, showextrema=False, vert=False)
ax.scatter(np.percentile(data, 50, axis=1),inds, marker="o", color="black")
ax.set_title("Segment-wise SDR distribution")
ax.vlines([np.min(acc), np.min(voc), np.max(acc), np.max(voc)], [0.8, 1.8, 0.8, 1.8], [1.2, 2.2, 1.2, 2.2], color="blue")
ax.hlines(inds, [np.min(acc), np.min(voc)], [np.max(acc), np.max(voc)], color='black', linestyle='--', lw=1, alpha=0.5)
ax.set_yticks([1,2])
ax.set_yticklabels(["Accompaniment", "Vocals"])
fig.set_size_inches(8, 3.)
fig.savefig("sdr_histogram.pdf", bbox_inches='tight')
def draw_spectrogram(example_wav="musb_005_angela thomas wade_audio_model_without_context_cut_28234samples_61002samples_93770samples_126538.wav"):
y, sr = Utils.load(example_wav, sr=None)
spec = np.abs(librosa.stft(y, 512, 256, 512))
norm_spec = librosa.power_to_db(spec**2)
black_time_frames = np.array([28234, 61002, 93770, 126538]) / 256.0
fig, ax = plt.subplots()
img = ax.imshow(norm_spec)
plt.vlines(black_time_frames, [0, 0, 0, 0], [10, 10, 10, 10], colors="red", lw=2, alpha=0.5)
plt.vlines(black_time_frames, [256, 256, 256, 256], [246, 246, 246, 246], colors="red", lw=2, alpha=0.5)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
plt.colorbar(img, cax=cax)
ax.xaxis.set_label_position("bottom")
#ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x * 256.0 / sr))
#ax.xaxis.set_major_formatter(ticks_x)
ax.xaxis.set_major_locator(ticker.FixedLocator(([i * sr / 256. for i in range(len(y)//sr + 1)])))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(([str(i) for i in range(len(y)//sr + 1)])))
ax.yaxis.set_major_locator(ticker.FixedLocator(([float(i) * 2000.0 / (sr/2.0) * 256. for i in range(6)])))
ax.yaxis.set_major_formatter(ticker.FixedFormatter([str(i*2) for i in range(6)]))
ax.set_xlabel("t (s)")
ax.set_ylabel('f (KHz)')
| |
Output the chat
y = curses.LINES - (2 + cy_chat_area)
for txt in chat_out:
if txt.startswith(">> ") or txt.startswith(" "):
clr = CLR_CHAT_RESP
else:
clr = CLR_CHAT_QUERY
scr.addstr(y, 1, handleNonAscii(txt), clr)
y += 1
if show_gui and curses.COLS > 20 and curses.LINES > 20:
_do_gui(curses.COLS - 20)
# Command line at the bottom
ln = line
if len(line) > 0 and line[0] == ":":
scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
CLR_CMDLINE)
scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
ln = line[1:]
else:
prompt = "Input (':' for command, Ctrl+C to quit)"
if show_last_key:
prompt += " === keycode: " + last_key
scr.addstr(curses.LINES - 2, 0,
make_titlebar(prompt,
curses.COLS - 1),
CLR_HEADING)
scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)
_do_meter(cy_chat_area + 2)
scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def make_titlebar(title, bar_length):
return title + " " + ("=" * (bar_length - 1 - len(title)))
##############################################################################
# Help system
help_struct = [('Log Scrolling shortcuts',
[("Up / Down / PgUp / PgDn",
"scroll thru history"),
("Ctrl+T / Ctrl+PgUp",
"scroll to top of logs (jump to oldest)"),
("Ctrl+B / Ctrl+PgDn",
"scroll to bottom of logs" + "(jump to newest)"),
("Left / Right",
"scroll long lines left/right"),
("Home / End",
"scroll to start/end of long lines")]),
("Query History shortcuts",
[("Ctrl+N / Ctrl+Left",
"previous query"),
("Ctrl+P / Ctrl+Right",
"next query")]),
("General Commands (type ':' to enter command mode)",
[(":quit or :exit",
"exit the program"),
(":meter (show|hide)",
"display the microphone level"),
(":keycode (show|hide)",
"display typed key codes (mainly debugging)"),
(":history (# lines)",
"set size of visible history buffer"),
(":clear",
"flush the logs")]),
("Log Manipulation Commands",
[(":filter 'STR'",
"adds a log filter (optional quotes)"),
(":filter remove 'STR'",
"removes a log filter"),
(":filter (clear|reset)",
"reset filters"),
(":filter (show|list)",
"display current filters"),
(":find 'STR'",
"show logs containing 'str'"),
(":log level (DEBUG|INFO|ERROR)",
"set logging level"),
(":log bus (on|off)",
"control logging of messagebus messages")]),
("Skill Debugging Commands",
[(":skills",
"list installed Skills"),
(":api SKILL",
"show Skill's public API"),
(":activate SKILL",
"activate Skill, e.g. 'activate skill-wiki'"),
(":deactivate SKILL",
"deactivate Skill"),
(":keep SKILL",
"deactivate all Skills except the indicated Skill")])]
help_longest = 0
for s in help_struct:
for ent in s[1]:
help_longest = max(help_longest, len(ent[0]))
HEADER_SIZE = 2
HEADER_FOOTER_SIZE = 4
def num_help_pages():
lines = 0
for section in help_struct:
lines += 3 + len(section[1])
return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE))
def do_draw_help(scr):
def render_header():
scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING)
scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING)
def render_help(txt, y_pos, i, first_line, last_line, clr):
if i >= first_line and i < last_line:
scr.addstr(y_pos, 0, txt, clr)
y_pos += 1
return y_pos
def render_footer(page, total):
text = "Page {} of {} [ Any key to continue ]".format(page, total)
scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING)
scr.erase()
render_header()
y = HEADER_SIZE
page = subscreen + 1
# Find first and last taking into account the header and footer
first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE)
last = first + (curses.LINES - HEADER_FOOTER_SIZE)
i = 0
for section in help_struct:
y = render_help(section[0], y, i, first, last, CLR_HEADING)
i += 1
y = render_help("=" * (curses.COLS - 1), y, i, first, last,
CLR_HEADING)
i += 1
for line in section[1]:
words = line[1].split()
ln = line[0].ljust(help_longest + 1)
for w in words:
if len(ln) + 1 + len(w) < curses.COLS:
ln += " " + w
else:
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
ln = " ".ljust(help_longest + 2) + w
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
i += 1
y = render_help(" ", y, i, first, last, CLR_CMDLINE)
i += 1
if i > last:
break
render_footer(page, num_help_pages())
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def show_help():
global screen_mode
global subscreen
if screen_mode != SCR_HELP:
screen_mode = SCR_HELP
subscreen = 0
set_screen_dirty()
def show_next_help():
global screen_mode
global subscreen
if screen_mode == SCR_HELP:
subscreen += 1
if subscreen >= num_help_pages():
screen_mode = SCR_MAIN
set_screen_dirty()
##############################################################################
# Skill debugging
def show_skills(skills):
"""Show list of loaded Skills in as many column as necessary."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Loaded Skills", CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 0
prepare_page()
col_width = 0
skill_names = sorted(skills.keys())
for skill in skill_names:
if skills[skill]['active']:
color = curses.color_pair(4)
else:
color = curses.color_pair(2)
scr.addstr(row, column, " {}".format(skill), color)
row += 1
col_width = max(col_width, len(skill))
if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]:
column = 0
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 2:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
column += col_width + 2
col_width = 0
if column > curses.COLS - 20:
# End of screen
break
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def show_skill_api(skill, data):
"""Show available help on Skill's API."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Skill-API for {}".format(skill),
CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 4
prepare_page()
for key in data:
color = curses.color_pair(4)
scr.addstr(row, column, "{} ({})".format(key, data[key]['type']),
CLR_HEADING)
row += 2
if 'help' in data[key]:
help_text = data[key]['help'].split('\n')
for line in help_text:
scr.addstr(row, column + 2, line, color)
row += 1
row += 2
else:
row += 1
if row == curses.LINES - 5:
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 5:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def center(str_len):
# generate number of characters needed to center a string
# of the given length
return " " * ((curses.COLS - str_len) // 2)
##############################################################################
# Main UI lopo
def _get_cmd_param(cmd, keyword):
# Returns parameter to a command. Will de-quote.
# Ex: find 'abc def' returns: abc def
# find abc def returns: abc def
if isinstance(keyword, list):
for w in keyword:
cmd = cmd.replace(w, "").strip()
else:
cmd = cmd.replace(keyword, "").strip()
if not cmd:
return None
last_char = cmd[-1]
if last_char == '"' or last_char == "'":
parts = cmd.split(last_char)
return parts[-2]
else:
parts = cmd.split(" ")
return parts[-1]
def wait_for_any_key():
"""Block until key is pressed.
This works around curses.error that can occur on old versions of ncurses.
"""
while True:
try:
scr.get_wch() # blocks
except curses.error:
# Loop if get_wch throws error
time.sleep(0.05)
else:
break
def handle_cmd(cmd):
global show_meter
global screen_mode
global log_filters
global cy_chat_area
global find_str
global show_last_key
if "show" in cmd and "log" in cmd:
pass
elif "help" in cmd:
show_help()
elif "exit" in cmd or "quit" in cmd:
return 1
elif "keycode" in cmd:
# debugging keyboard
if "hide" in cmd or "off" in cmd:
show_last_key = False
elif "show" in cmd or "on" in cmd:
show_last_key = True
elif "meter" in cmd:
# microphone level meter
if "hide" in cmd or "off" in cmd:
show_meter = False
elif "show" in cmd or "on" in cmd:
show_meter = True
elif "find" in cmd:
find_str = _get_cmd_param(cmd, "find")
rebuild_filtered_log()
elif "filter" in cmd:
if "show" in cmd or "list" in cmd:
# display active filters
add_log_message("Filters: " + str(log_filters))
return
if "reset" in cmd or "clear" in cmd:
log_filters = list(default_log_filters)
else:
# extract last word(s)
param = _get_cmd_param(cmd, "filter")
if param:
if "remove" in cmd and param in log_filters:
log_filters.remove(param)
else:
log_filters.append(param)
rebuild_filtered_log()
add_log_message("Filters: " + str(log_filters))
| |
<filename>fonctions a copier.py
"A copier"
1 = """
robot = IA(ter.position_vert,type.vert,im.vert_sprinter,im.vert_fighter,im.vert_tank,im.attak_vert,im.explosion_vert,"rouge","vert")
global groupe_lignes, groupe_chute
groupe_lignes = []
line = False
for i in range(60, (len(ter.contenu)-6)*30 +15, 15 ):
robot.rect.bottom = i
for j in range(6, (len(ter.contenu[0])-3)*30, 1 ):
robot.rect.left = j
collide = False
for bloc in ter.grp_bloc:
if robot.rect.colliderect(bloc.rect):
collide = True
if line == True:
groupe_lignes.append((point_depart, point_arrivee))
line = False
break
if collide == False:
robot.rect.top += 1
for bloc in ter.grp_bloc:
if robot.rect.colliderect(bloc.rect):
collide = True
robot.rect.top -= 1
if collide == True:
if line == False:
line = True
point_depart = robot.rect.center
elif line == True:
point_arrivee = robot.rect.center
elif collide == False and line == True:
groupe_lignes.append((point_depart, point_arrivee))
line = False
for bloc_piege in ter.grp_bloc_pieg:
line = False
robot.rect.bottom = bloc_piege.rect.bottom
robot.rect.right = bloc_piege.rect.left
i = 1
while i:
i += 1
robot.rect.left += 1
if robot.test_collision() == None:
print("on a marché sur un piege !")
robot.rect.top += 1
if robot.test_collision() == True:
robot.rect.top -= 1
if line == False:
point_depart = robot.rect.center
line = True
else:
point_arrivee = robot.rect.center
else:
robot.rect.top -= 1
if line == True:
groupe_lignes.append((point_depart, point_arrivee))
line = False
break
if i > 30 + robot.rect.width and line == False:
i = 0
ecran.blit(im.fond, (0,0))
ter.grp_bloc.draw(ecran)
for bloc in ter.grp_bloc_effet:
ecran.blit(bloc.image.subsurface(0,0,30,30), bloc.rect.topleft)
for pers in ter.grp_pers:
ecran.blit(pers.image[pers.direction][pers.index_img], pers.rect.topleft)
couleur = (55 + 200 * abs( pers.vie/pers.full_vie -1 ), 255 - 200 * abs( pers.vie/pers.full_vie -1 ), 50)
pygame.draw.rect(ecran, couleur, (pers.rect.left, pers.rect.top-10, pers.rect.width*pers.vie/pers.full_vie, 5))
for ligne in groupe_lignes:
pygame.draw.line(ecran, (0,0,0), ligne[0], ligne[1])
pygame.display.flip()
pygame.time.wait(3000)
print("groupe_lignes = [", end = "")
for ligne in groupe_lignes:
print("",ligne,",", end = "")
print("]")
groupe_chute = []
for i in 1, -1:
for ligne in groupe_lignes:
robot.rect.center = ligne[0]
robot.rect.centerx -= i
collide = False
for bloc in ter.grp_bloc:
if robot.rect.colliderect(bloc):
collide = True
if collide == False:
for ligne2 in groupe_lignes:
if ligne[0][0] - ligne2[1][0] == i:
if -15 <= ligne[0][1] - ligne2[0][1] < 0:
groupe_chute.append([ligne, ligne2])
elif ligne[1][0] - ligne2[0][0] == i:
if -15 <= ligne[0][1] - ligne2[0][1] < 0:
groupe_chute.append([ligne, ligne2])
for chute1 in groupe_chute:
restart = True
while restart:
restart = False
for chute2 in groupe_chute:
if chute1 != chute2:
if chute1[-1] == chute2[0]:
for i in range(1,len(chute2)):
chute1.append(chute2[i])
groupe_chute.remove(chute2)
restart = True
elif chute1[0] == chute2[-1]:
for i in range(1,len(chute2)):
chute1.insert(0,chute2[-i-1])
groupe_chute.remove(chute2)
restart = True
elif chute1[0] == chute2[0]:
for i in range(1,len(chute2)):
chute1.insert(0,chute2[i])
groupe_chute.remove(chute2)
restart = True
# utile ?
elif chute1[-1] == chute2[-1]:
for i in range(1,len(chute2)):
chute1.append(chute2[-i-1])
groupe_chute.remove(chute2)
restart = True
for line1 in groupe_lignes:
ajout = True
for lines in groupe_chute:
for line2 in lines:
if line1 == line2:
ajout = False
if ajout == True:
groupe_chute.append([line1])
ecran.blit(im.fond, (0,0))
ter.grp_bloc.draw(ecran)
for bloc in ter.grp_bloc_effet:
ecran.blit(bloc.image.subsurface(0,0,30,30), bloc.rect.topleft)
for lines in groupe_chute:
for line in lines:
pygame.draw.line(ecran,(0,0,0),line[0], line[1])
pygame.display.flip()
pygame.time.wait(1000)
print("groupe_chute = [", end = "")
for lines in groupe_chute:
print("[", end = "")
for line in lines:
print("",line,",", end = "")
print("],", end = "")
print("]")
"""
2 = """ecran.blit(im.fond, (0,0))
for groupe in (ter.grp_bloc, ter.grp_btn_bloc, ter.grp_btn_play):
groupe.draw(ecran)
for bloc in ter.grp_bloc_effet:
ecran.blit(bloc.image.subsurface(0,0,30,30), (bloc.rect.left, bloc.rect.top))
for liste in self.act_listes:
pygame.draw.rect(ecran, (0,0,200), ((liste[-1][0]*30, liste[-1][1]*30), (30,30)))
pygame.display.update()
pygame.time.wait(100)"""
3 = """ecran.blit(im.fond, (0,0))
for groupe in (ter.grp_bloc, ter.grp_btn_bloc, ter.grp_btn_play):
groupe.draw(ecran)
for bloc in ter.grp_bloc_effet:
ecran.blit(bloc.image.subsurface(0,0,30,30), (bloc.rect.left, bloc.rect.top))
for point in self.chemin:
pygame.draw.rect(ecran, (0,0,200), ((point[0]*30, point[1]*30), (30,30)))
pygame.display.update()
#pygame.time.wait(100)"""
4 = """
def create_cadre(self,longueur, hauteur,name):
vide = self.vide.copy()
image = vide.subsurface(0,0,longueur,hauteur)
image.blit(self.cadre.subsurface(0,0,4,4),(0,0))
image.blit(self.cadre.subsurface(0,5,4,4),(0,hauteur-4))
image.blit(self.cadre.subsurface(5,0,4,4),(longueur-4,0))
image.blit(self.cadre.subsurface(5,5,4,4),(longueur-4,hauteur-4))
for i in range(longueur-8):
for j in range(hauteur-8):
image.blit(self.cadre.subsurface(4,4,1,1),(i+4,j+4))
for i in range(longueur-8):
image.blit(self.cadre.subsurface(4,0,1,4),(i+4,0))
image.blit(self.cadre.subsurface(4,5,1,4),(i+4,hauteur-4))
for j in range(hauteur-8):
image.blit(self.cadre.subsurface(0,4,4,1),(0,j+4))
image.blit(self.cadre.subsurface(5,4,4,1),(longueur-4,j+4))
pygame.image.save(image,"Images/Cadres/"+name+".png")
return image
"""
class Missile(pygame.sprite.Sprite):
def __init__(self, centerx,centery, lanceur):
pygame.sprite.Sprite.__init__(self)
self.image = [im.missile.subsurface(index,0,24,24)for index in range(0,96,24)]
self.lanceur = lanceur
self.rect = pygame.Rect(0,0, 24, 24)
self.rect.center = (arrondir(centerx)+15,arrondir(centery)+15)
self.vie = 6
self.index_img_explosion = 0
self.degel = 0
self.x = int(self.rect.centerx/30)
self.y = int(self.rect.centery/30)
self.dx = self.dy = 0
self.act_listes = self.anc_listes = self.new_listes = self.chemin = []
self.search_cible()
self.index_img = int(self.dx/2 +0.5) +2 if self.dx else int(self.dy/2 +0.5)
#__________________________________________
def move(self):
if not self.vie > 0:
self.index_img_explosion += 1
if not self.index_img_explosion < 24.5:
ter.grp_missile.remove(self)
else:
if self.degel:
self.degel -= 1
else:
if self.cible.mort or not self.cible:
self.search_cible()
if not self.cible:
self.vie = 0
if self.vie > 0:
if self.besoin_tourner(self.rect.centerx,self.rect.centery):
self.recherche_deplacement_vers_cible()
self.rect.centerx += self.dx*5
self.rect.centery += self.dy*5
for groupe in (ter.grp_pers, ter.grp_missile):
for objet in groupe:
if self.rect.colliderect(objet.rect) and objet != self.lanceur and objet != self and objet.vie > 0:
objet.vie -= 15
self.vie = 0
if groupe == ter.grp_pers:
objet.temps_recharg_anim_degat = 20
for bloc in ter.grp_bloc:
if self.rect.colliderect(bloc.rect):
self.vie = 0
self.index_img = int(self.dx/2 +0.5) +2 if self.dx else int(self.dy/2 +0.5)
def recherche_deplacement_vers_cible(self):
self.x = int(self.rect.centerx/30)
self.y = int(self.rect.centery/30)
but = (int(self.cible.rect.centerx/30),int(self.cible.rect.centery/30))
self.act_listes = [[(self.x, self.y)]]
self.anc_listes = []
self.chemin = []
self.new_listes = []
if but == (self.x, self.y):
self.chemin = [(self.x, self.y), (self.x, self.y)]
while self.chemin == []:
for liste1 in self.act_listes:
deplacements = []
for i in (1,0), (0,1), (-1,0), (0,-1):
if self.deplacement_possible( (liste1[-1][0]+i[0], liste1[-1][1]+i[1]) ):
deplacements.append(i)
deplacements_diagonale = []
for i in deplacements:
for j in deplacements:
if math.sqrt((i[0]+j[0])**2) == 1:
if self.deplacement_possible( (liste1[-1][0]+i[0]+j[0], liste1[-1][1]+i[1]+j[1]) ):
ajout = True
for vecteur in deplacements_diagonale:
if (i[0]+j[0], i[1]+j[1]) == vecteur:
ajout = False
if ajout: deplacements_diagonale.append( (i[0]+j[0], i[1]+j[1]) )
for i in deplacements_diagonale:
deplacements.append(i)
for i in deplacements:
point = ( (liste1[-1][0]+i[0], liste1[-1][1]+i[1]) )
temp = []
for point2 in liste1:
temp.append(point2)
temp.append(point)
if point == but:
self.chemin = temp
self.new_listes.append(temp)
self.anc_listes = []
for i in self.act_listes:
self.anc_listes.append(i)
self.act_listes = []
for i in self.new_listes:
self.act_listes.append(i)
self.new_listes = []
if self.act_listes == []:
self.chemin = [(self.x, self.y), (self.x, self.y)]
self.vie = 0
#2 a copier
self.dx = self.chemin[1][0]-self.x
self.dy = self.chemin[1][1]-self.y
#3 a copier
def deplacement_possible(self, point):
ajout = False
case = ter.contenu[point[1]+1][point[0]+1]
if case == "vide"or case == "axel" or case == "jump":
ajout = True
for listes in self.act_listes, self.anc_listes:
for liste in listes:
if liste[-1] == point:
ajout = False
return ajout
def search_cible(self):
list = []
for pers in ter.grp_pers:
if pers != self.lanceur and pers.vie > 0:
self.cible = pers
self.recherche_deplacement_vers_cible()
if self.vie:
list.append((pers, len(self.chemin)))
if list:
if len(list) == 2:
self.cible = list[0][0] if list[0][1] < list[1][1] else list[1][0]
else:
self.cible = list[0][0]
ter.grp_missile.add(self)
self.vie = 6
def besoin_tourner(self, x,y):
return True if x == int(x/30)*30+15 and y == int(y/30)*30+15 else False
"""
Classe qui definit le missiles a tete chercheuse envoyes par les Sprinters
"""
class Bombe(pygame.sprite.Sprite):
def __init__(self, centre_x, centre_y, lanceur, direction):
pygame.sprite.Sprite.__init__(self)
self.coord_lancement =[centre_x,centre_y]
self.rect = pygame.Rect(centre_x - 10, centre_y - 10, 20, 20)
self.image = im.bombe
self.lanceur = lanceur
self.index_img = self.count = self.index_img_explosion = 0
self.count_traj = -1
self.direction = direction
self.vie = 1
self.degel = 0
ter.grp_bombe.add(self)
def move(self):
if self.vie <= 0:
self.index_img_explosion += 0.3
if not self.index_img_explosion < 8:
ter.grp_bombe.remove(self)
elif self.degel:
self.degel -= 1
else:
self.count_traj += 1
self.rect.centery = self.coord_lancement[1] - ( -self.count_traj**2 + 80*self.count_traj ) /60
self.rect.centerx += self.direction*8
explode = False
for groupe in (ter.grp_pers, ter.grp_missile, ter.grp_bombe):
for objet in groupe:
if self.rect.colliderect(objet.rect) and objet != self.lanceur and objet != self and objet.vie:
explode = True
break
for bloc in ter.grp_bloc:
if self.rect.colliderect(bloc.rect):
explode = True
if explode:
self.explosion()
def explosion(self):
self.vie = 0
for groupe in (ter.grp_pers, ter.grp_attak, ter.grp_missile, ter.grp_bombe):
for objet in groupe:
distance = math.sqrt((objet.rect.centerx - self.rect.centerx)**2 + (objet.rect.centery - self.rect.centery)**2)
if distance < 210 and objet != self.lanceur and objet != self and objet.vie:
if groupe == ter.grp_bombe:
objet.explosion()
objet.vie -= (-distance*40) /210 +40
if groupe == ter.grp_pers:
objet.temps_recharg_anim_degat = 20
"""
Classe qui definit les bombes envoyees par les Fighters
"""
class Glace(pygame.sprite.Sprite):
def __init__(self, centerx, centery, lanceur):
pygame.sprite.Sprite.__init__(self)
self.rect = pygame.Rect(centerx-192,centery-192,384,384)
self.index_img = 0
self.lanceur = lanceur
for groupe in(ter.grp_pers, ter.grp_attak, ter.grp_missile, ter.grp_bombe):
for objet in groupe:
if objet != self.lanceur:
distance = math.sqrt( (self.rect.centerx - objet.rect.centerx)**2 + (self.rect.centery - objet.rect.centery)**2 )
if distance < 192 and objet.vie > 0 and objet.degel == | |
of Abel transform to be performed
Returns
-------
A : n × n numpy array
matrix of the Abel transform (forward or inverse)
"""
global _bs_prm, _bs, _trf_prm, _trf, _tri_prm, _tri
sigma = float(sigma) # (ensure FP format)
nbf = _nbf(n, sigma)
M = None
# Check whether basis for these parameters is already loaded
if _bs_prm == [n, sigma]:
if verbose:
print('Using memory-cached basis sets')
M, Mc = _bs
else: # try to load basis
if basis_dir is not None:
basis_file = 'basex_basis_{}_{}.npy'.format(n, sigma)
def full_path(file_name):
return os.path.join(basis_dir, file_name)
# Try to find a suitable existing basis set
if os.path.exists(full_path(basis_file)):
# have exactly needed
best_file = basis_file
else:
# Find the best (smallest among sufficient)
# and the largest (to extend if not sufficient)
best_file = None
best_n = sys.maxsize
largest_file = None
largest_n = 0
mask = re.compile(r'basex_basis_(\d+)_{}\.npy$'.format(sigma))
for f in listdir(basis_dir):
# filter BASEX basis files
match = mask.match(f)
if not match:
continue
# extract basis image size (sigma was fixed above)
f_n = int(match.group(1))
# must be large enough and smaller than previous best
if f_n >= n and f_n < best_n:
# remember as new best
best_file = f
best_n = f_n
# largest must be just larger than previous
if f_n > largest_n:
# remember as new largest
largest_file = f
largest_n = f_n
# If found, try to use it
if best_file:
if verbose:
print('Loading basis sets...')
# saved as a .npy file
try:
M, Mc, M_version = np.load(full_path(best_file))
# crop if loaded larger
if M.shape != (n, nbf):
M = M[:n, :nbf]
Mc = Mc[:n, :nbf]
if verbose:
print('(cropped from {})'.format(best_file))
except ValueError:
print('Cached basis file incompatible.')
if M is None: # generate the basis set
if verbose:
print('A suitable basis set was not found.',
'A new basis set will be generated.',
'This may take a few minutes.', sep='\n')
if basis_dir is not None:
print('But don\'t worry, '
'it will be saved to disk for future use.')
# Try to extend the largest available
try:
oldM, oldMc, M_version = np.load(full_path(largest_file))
if verbose:
print('(extending {})'.format(largest_file))
except:
oldM = None # (old Mc is not needed)
M, Mc = _bs_basex(n, sigma, oldM, verbose=verbose)
if basis_dir is not None:
np.save(full_path(basis_file),
(M, Mc, np.array(__version__)))
if verbose:
print('Basis set saved for later use to')
print(' {}'.format(basis_file))
_bs_prm = [n, sigma]
_bs = [M, Mc]
_trf_prm = None
_tri_prm = None
# Check whether transform matrices for these parameters
# are already created
if direction == 'forward' and _trf_prm == [reg, correction, dr]:
A = _trf
elif direction == 'inverse' and _tri_prm == [reg, correction, dr]:
A = _tri
else: # recalculate
if verbose:
print('Updating regularization...')
A = _get_A(*_bs, reg=reg, direction=direction)
if correction:
if verbose:
print('Calculating correction...')
cor = get_basex_correction(A, sigma, direction)
A = np.multiply(A, cor)
if direction == 'forward':
_trf_prm = [reg, correction, dr]
_trf = A
else: # 'inverse'
_tri_prm = [reg, correction, dr]
_tri = A
# apply intensity scaling, if needed
if dr != 1.0:
if direction == 'forward':
A *= dr
else: # 'inverse'
A /= dr
return A
def cache_cleanup(select='all'):
"""
Utility function.
Frees the memory caches created by ``get_bs_cached()``.
This is usually pointless, but might be required after working
with very large images, if more RAM is needed for further tasks.
Parameters
----------
select : str
selects which caches to clean:
``all`` (default)
everything, including basis;
``forward``
forward transform;
``inverse``
inverse transform.
Returns
-------
None
"""
global _bs_prm, _bs, _trf_prm, _trf, _tri_prm, _tri
if select == 'all':
_bs_prm = None
_bs = None
if select in ('all', 'forward'):
_trf_prm = None
_trf = None
if select in ('all', 'inverse'):
_tri_prm = None
_tri = None
def get_basex_correction(A, sigma, direction):
"""
Internal function.
The default BASEX basis and the way its projection is calculated
leads to artifacts in the reconstructed distribution --
incorrect overall intensity for **sigma** = 1,
intensity oscillations for other **sigma** values,
intensity fluctuations (and drop-off for **reg** > 0) near *r* = 0.
This function generates the intensity correction profile
from the BASEX result for a step function with a soft edge (to avoid
ringing) aligned with the last basis function.
Parameters
----------
A : n × n numpy array
matrix of the Abel transform
sigma : float
basis width parameter
direction : str: ``'forward'`` or ``'inverse'``
type of the Abel transform
Returns
-------
cor : 1 × n numpy array
intensity correction profile
"""
n = A.shape[0]
nbf = _nbf(n, sigma)
# Generate soft step function and its projection
r = np.arange(float(n))
# edge center, aligned with the last basis function
c = (nbf - 0.5) * sigma
# soft-edge halfwidth
w = sigma
# soft step: stitched constant (shelf) and 2 parabolas (soft edge)
step = PiecewisePolynomial(r, [(0, c - w, [1]),
(c - w, c, [1, 0, -1/2], c - w, w),
(c, c + w, [0, 0, 1/2], c + w, w)])
# (this is more numerically stable at large r than cubic smoothstep)
# get BASEX Abel transform of the step
# and set correction profile = expected / BASEX result
if direction == 'forward':
tran = basex_core_transform(step.func, A)
cor = step.abel / tran
else: # 'inverse'
tran = basex_core_transform(step.abel, A)
cor = step.func / tran
return cor
# The analytical expresion for the k-th basis-function projection
# involves a sum of k^2 terms, most of which are very small.
# Setting BASIS_SET_CUTOFF = c truncates this sum to ±cu terms
# around the maximum (u = x / sigma).
# The computation time is roughly proportional to this parameter,
# while the accuracy (at least for n, k < 10000) is as follows:
# cutoff relative error
# 4 < 2e-4
# 5 < 2e-6
# 6 < 7e-9
# 7 < 1e-11
# 8 < 6e-15
# 9 < 1e-15
# The last one reaches the 64-bit floating-point precision,
# so going beyond that is useless.
# See https://github.com/PyAbel/PyAbel/issues/230
BASIS_SET_CUTOFF = 9 # numerically exact
def _bs_basex(n=251, sigma=1.0, oldM=None, verbose=True):
"""
Generates horizontal basis sets for the BASEX method.
Parameters
----------
n : int
horizontal dimensions of the half-width image in pixels.
Must include the axial pixel.
See https://github.com/PyAbel/PyAbel/issues/34
sigma : float
width parameter for basis functions
oldM : numpy array
projected basis matrix for the same **sigma** but a smaller image size.
Can be supplied to avoid recalculating matrix elements
that are already available.
Returns
-------
M, Mc : n × nbf numpy array
Mc
is the reconstructed-image basis rho_k(r_i) (~Gaussians),
corresponds to Z^T in the article.
M
is the projected basis chi_k(x_i),
corresponds to X^T in the article.
"""
sigma = float(sigma) # (ensure FP type)
nbf = _nbf(n, sigma) # number of basis functions
if verbose:
print('Generating horizontal BASEX basis sets for '
'n = {}, sigma = {} (nbf = {}):'.format(n, sigma, nbf))
print('k = 0...', end='')
sys.stdout.flush()
# Precompute tables of ln Gamma(...) terms;
# notice that index i corresponds to argument i + 1 (and i + 1/2).
maxk2 = (nbf - 1)**2
# for Gamma(k^2 + 1) and Gamma(l + 1)
lngamma = gammaln(np.arange(maxk2 + 1) + 1)
# for Gamma(k^2 - l + 1) - Gamma(k^2 - l + 1/2)
Dlngamma = lngamma - gammaln(np.arange(maxk2 + 1) + 1/2)
# reduced coordinates u = x/sigma (or r/sigma) and their squares
U = np.arange(float(n)) / sigma
U2 = U * U
Mc = np.empty((n, nbf))
M = np.empty((n, nbf))
# (indexing is Mc[r, k], M[x, k])
old_n, old_nbf = 0, 0
# reuse old elements, if available
if oldM is not None:
old_n, old_nbf = oldM.shape
M[:old_n, :old_nbf] = oldM / sigma # (full M will be *= sigma later)
# Cases k = 0 and x = 0 (r = 0) are special, since general expressions
# are valid only if considered as limits; here they are computed
# separately, using expressions that | |
# coding: utf-8
"""
NEF_Emulator
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from evolved5g.swagger_client.api_client import ApiClient
class LocationFrontendApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_path_api_v1_frontend_location_post(self, body, **kwargs): # noqa: E501
"""Create Path # noqa: E501
Create new path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_path_api_v1_frontend_location_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathCreate body: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs) # noqa: E501
return data
def create_path_api_v1_frontend_location_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Create Path # noqa: E501
Create new path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_path_api_v1_frontend_location_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathCreate body: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_path_api_v1_frontend_location_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_path_api_v1_frontend_location_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_path_api_v1_frontend_location_id_delete(self, id, **kwargs): # noqa: E501
"""Delete Path # noqa: E501
Delete an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_path_api_v1_frontend_location_id_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_path_api_v1_frontend_location_id_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete Path # noqa: E501
Delete an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_path_api_v1_frontend_location_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_path_api_v1_frontend_location_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_path_api_v1_frontend_location_id_get(self, id, **kwargs): # noqa: E501
"""Read Path # noqa: E501
Get path by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_path_api_v1_frontend_location_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs) # noqa: E501
return data
def read_path_api_v1_frontend_location_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Read Path # noqa: E501
Get path by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_path_api_v1_frontend_location_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_path_api_v1_frontend_location_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read_path_api_v1_frontend_location_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_paths_api_v1_frontend_location_get(self, **kwargs): # noqa: E501
"""Read Paths # noqa: E501
Retrieve paths. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_paths_api_v1_frontend_location_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip:
:param int limit:
:return: list[Path]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs) # noqa: E501
return data
def read_paths_api_v1_frontend_location_get_with_http_info(self, **kwargs): # noqa: E501
"""Read Paths # noqa: E501
Retrieve paths. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_paths_api_v1_frontend_location_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip:
:param int limit:
:return: list[Path]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['skip', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_paths_api_v1_frontend_location_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Path]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_path_api_v1_frontend_location_id_put(self, body, id, **kwargs): # noqa: E501
"""Update Path # noqa: E501
Update an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass | |
<gh_stars>1-10
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
import numpy as np
def l2_loss_1Daction(q_actions, action_idx, expected_q, n_actions, b_weight):
"""
l2 loss for 1D action space.
e.g. "go east" would be one whole action.
:param q_actions: Q-vector of a state for all actions
:param action_idx: placeholder, the action chose for the state,
in a format of (tf.int32, [None])
:param expected_q: placeholder, the expected reward gained from the step,
in a format of (tf.float32, [None])
:param n_actions: number of total actions
"""
actions_mask = tf.one_hot(indices=action_idx, depth=n_actions)
predicted_q = tf.reduce_sum(tf.multiply(q_actions, actions_mask),
axis=1)
loss = tf.reduce_mean(b_weight * tf.square(expected_q - predicted_q))
abs_loss = tf.abs(expected_q - predicted_q)
return loss, abs_loss
def l2_loss_2Daction(
q_actions, action_idx, expected_q,
n_actions, action_len, max_action_len, b_weight):
"""
l2 loss for 2D action space.
e.g. "go east" is an action composed by "go" and "east".
:param q_actions: Q-matrix of a state for all action-components, e.g. tokens
:param action_idx: placeholder, the action-components chose for the state,
in a format of (tf.int32, [None, None])
:param expected_q: placeholder, the expected reward gained from the step,
in a format of (tf.float32, [None])
:param n_actions: number of action-components
:param action_len: length of each action in a format of (tf.int32, [None])
:param max_action_len: maximum length of action
"""
actions_idx_mask = tf.one_hot(indices=action_idx, depth=n_actions)
q_actions_mask = tf.sequence_mask(
action_len, maxlen=max_action_len, dtype=tf.float32)
masked_q_actions = tf.multiply(q_actions, q_actions_mask[:, :, tf.newaxis])
q_query_by_idx = tf.multiply(masked_q_actions, actions_idx_mask)
sum_q_by_idx = tf.reduce_sum(q_query_by_idx, axis=[1, 2])
predicted_q = tf.div(sum_q_by_idx, tf.cast(action_len, tf.float32))
loss = tf.reduce_mean(b_weight * tf.square(expected_q - predicted_q))
abs_loss = tf.abs(expected_q - predicted_q)
return loss, abs_loss
def encoder_lstm(src, src_len, src_embeddings, num_units, num_layers):
"""
encode state with LSTM
:param src: placeholder, (tf.int32, [None, None])
:param src_len: placeholder, (tf.float32, [None])
:param src_embeddings: (tf.float32, [vocab_size, embedding_size])
:param num_units: number of LSTM units
:param num_layers: number of LSTM layers
"""
encoder_cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.LSTMCell(num_units) for _ in range(num_layers)])
src_emb = tf.nn.embedding_lookup(src_embeddings, src)
_, inner_states = tf.nn.dynamic_rnn(
encoder_cell, src_emb, sequence_length=src_len,
initial_state=None, dtype=tf.float32)
return inner_states
def encoder_cnn_prepare_input(src, src_embeddings, pos_embeddings):
"""
encode state with CNN, refer to
Convolutional Neural Networks for Sentence Classification
:param src: placeholder, (tf.int32, [batch_size, src_len])
:param src_embeddings: (tf.float32, [vocab_size, embedding_size])
:param pos_embeddings: (tf.float32, [pos_emb_len, embedding_size])
:param filter_sizes: list of ints, e.g. [3, 4, 5]
:param num_filters: number of filters of each filter_size
:param embedding_size: embedding size
:return inner_state: (tf.float32, [batch_size, max_src_len, len(filter_sizes) * num_filters])
"""
src_emb = tf.nn.embedding_lookup(src_embeddings, src)
pos_emb = tf.slice(pos_embeddings, [0, 0], [tf.shape(src_emb)[1], -1])
src_pos_emb = src_emb + pos_emb
src_emb_expanded = tf.expand_dims(src_pos_emb, axis=-1) # channel dimension
return src_emb_expanded
def encoder_cnn_base(input_tensor, filter_sizes, num_filters, embedding_size):
layer_outputs = []
for i, fs in enumerate(filter_sizes):
with tf.variable_scope("conv-avgpool-block-%s" % fs):
src_paddings = tf.constant([[0, 0], [fs - 1, 0], [0, 0], [0, 0]])
src_w_pad = tf.pad(input_tensor, paddings=src_paddings, mode="CONSTANT")
# Convolution Layer
filter_shape = [fs, embedding_size, 1, num_filters]
w = tf.get_variable(
name="W",
initializer=lambda: tf.truncated_normal(filter_shape, stddev=0.1))
b = tf.get_variable(
name="b",
initializer=lambda: tf.constant(0.1, shape=[num_filters]))
conv = tf.nn.conv2d(
input=src_w_pad, filter=w, strides=[1, 1, 1, 1],
padding="VALID", name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
layer_outputs.append(h)
# Combine all the pooled features
# Squeeze the 3rd dim that is the col of conv result
inner_state = tf.squeeze(tf.concat(layer_outputs, axis=-1), axis=[2])
return inner_state
def encoder_cnn_block(
src, src_embeddings, pos_embeddings,
filter_sizes, num_filters,
embedding_size):
in_tn = encoder_cnn_prepare_input(src, src_embeddings, pos_embeddings)
return encoder_cnn_base(in_tn, filter_sizes, num_filters, embedding_size)
def encoder_cnn(
src, src_embeddings, pos_embeddings, filter_sizes, num_filters,
embedding_size):
"""
encode state with CNN, refer to
Convolutional Neural Networks for Sentence Classification
:param src: placeholder, (tf.int32, [None, None])
:param src_embeddings: (tf.float32, [vocab_size, embedding_size])
:param pos_emb: position embedding, (tf.float32, [src_len, embedding_size])
:param filter_sizes: list of ints, e.g. [3, 4, 5]
:param num_filters: number of filters of each filter_size
:param embedding_size: embedding size
"""
with tf.variable_scope("cnn_encoder"):
h_cnn = encoder_cnn_block(
src, src_embeddings, pos_embeddings, filter_sizes, num_filters,
embedding_size)
pooled = tf.reduce_max(h_cnn, axis=1)
num_filters_total = num_filters * len(filter_sizes)
inner_states = tf.reshape(pooled, [-1, num_filters_total])
return inner_states
def encoder_cnn_multilayers(
src, src_embeddings, pos_embeddings, num_layers, filter_size, embedding_size):
"""
encode state with CNN, refer to
Convolutional Neural Networks for Sentence Classification
:param src: placeholder, (tf.int32, [None, None])
:param src_embeddings: (tf.float32, [vocab_size, embedding_size])
:param pos_emb: position embedding, (tf.float32, [src_len, embedding_size])
:param filter_sizes: list of ints, e.g. [3, 4, 5]
:param num_filters: number of filters of each filter_size
:param embedding_size: embedding size
"""
in_tn = encoder_cnn_prepare_input(src, src_embeddings, pos_embeddings)
out_tns = []
with tf.variable_scope("cnn_encoder_multilayers"):
for layer in range(num_layers):
with tf.variable_scope("cnn_encoder_layer_{}".format(layer)):
h_cnn = encoder_cnn_base(
in_tn,
filter_sizes=[filter_size],
num_filters=embedding_size,
embedding_size=embedding_size)
out_tns.append(h_cnn)
in_tn = tf.expand_dims(h_cnn, axis=-1)
return out_tns[-1]
def encoder_cnn_multichannels(
src, src_len, src_embeddings, filter_sizes, num_filters,
embedding_size, num_channels):
"""
:param src: ('int32', [None, None, None])
:param src_len: ('float', [None, None])
:param src_embeddings:
:param filter_sizes:
:param num_filters:
:param embedding_size:
:return:
"""
src_emb = tf.nn.embedding_lookup(src_embeddings, src)
src_emb_trans = tf.transpose(src_emb, perm=[0, 2, 3, 1]) # for NHWC
max_src_len = tf.shape(src)[1]
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size,
num_channels, num_filters]
w = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1),
name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d( # depthwise_conv2d is too slow
src_emb_trans,
w,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Max-pooling over the outputs
src_mask = tf.sequence_mask(src_len - filter_size + 1,
maxlen=max_src_len)
h = tf.multiply(h, src_mask)
pooled = tf.reduce_max(h, axis=1)
pooled_outputs.append(pooled)
# Combine all the pooled features
num_features_total = num_filters * len(filter_sizes)
h_pool = tf.concat(pooled_outputs, axis=3)
h_pool_flat = tf.reshape(h_pool, [-1, num_features_total])
inner_states = h_pool_flat
return inner_states
def decoder_dense_classification(inner_states, n_actions):
"""
:param inner_states:
:param n_actions:
:return:
"""
q_actions = tf.layers.dense(inner_states, units=n_actions, use_bias=True)
return q_actions
def decoder_fix_len_lstm(
inner_state, n_actions, tgt_embeddings, num_units, num_layers,
sos_id, eos_id, max_action_len=10):
"""
:param inner_state:
:param n_actions:
:param tgt_embeddings:
:param num_units:
:param num_layers:
:param sos_id:
:return:
"""
batch_size = tf.shape(inner_state[-1].c)[0]
decoder_cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.LSTMCell(num_units) for _ in range(num_layers)])
projection_layer = tf.layers.Dense(units=n_actions, use_bias=True)
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=tgt_embeddings,
start_tokens=tf.fill([batch_size], sos_id),
end_token=eos_id)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell, helper, inner_state,
output_layer=projection_layer)
outputs, output_state, _ = tf.contrib.seq2seq.dynamic_decode(
decoder, output_time_major=False, impute_finished=True,
swap_memory=True, maximum_iterations=max_action_len)
q_actions = outputs.rnn_output
return q_actions
def decoder_fix_len_cnn(
inner_state, tgt_embeddings, pos_embeddings, n_tokens, embedding_size,
filter_sizes, num_filters, sos_id, max_action_len=10):
with tf.variable_scope("cnn_decoder", reuse=tf.AUTO_REUSE):
projection_layer = tf.layers.Dense(
units=n_tokens, use_bias=True, name="dense_tokens")
def decode_one(x, z):
# get state of last token
h = encoder_cnn_block(
src=x, src_embeddings=tgt_embeddings,
pos_embeddings=pos_embeddings,
filter_sizes=filter_sizes, num_filters=num_filters,
embedding_size=embedding_size)[:, -1, :]
# compute attention weight
a = tf.nn.softmax(tf.reduce_sum(
tf.multiply(tf.expand_dims(h, axis=1), z), axis=-1))
# compute attention vector
c = tf.reduce_sum(tf.multiply(a[:, :, tf.newaxis], z), axis=1)
# compute token distribution
y = projection_layer(h + c)
return y
def cond(i, out_ta, in_tn):
return tf.less(i, max_action_len)
def body(i, out_ta, in_tn):
token_readout = decode_one(in_tn, inner_state)
token_idx = tf.argmax(token_readout, axis=1, output_type=tf.int32)
out_ta = out_ta.write(i, token_readout)
in_tn = tf.concat([in_tn, token_idx[:, tf.newaxis]], axis=1)
i = tf.add(i, 1)
return [i, out_ta, in_tn]
start_i = tf.constant(0, dtype=tf.int32)
batch_size = tf.shape(inner_state)[0]
batch_sos = tf.tile([sos_id], [batch_size])
input_tn = batch_sos[:, tf.newaxis]
output_ta = tf.TensorArray(dtype=tf.float32, size=max_action_len)
_, output_ta, _ = tf.while_loop(
cond, body,
loop_vars=[start_i, output_ta, input_tn],
shape_invariants=[start_i.get_shape(), tf.TensorShape(None),
tf.TensorShape([None, None])])
q_actions = tf.transpose(output_ta.stack(), perm=[1, 0, 2])
return q_actions
def decoder_fix_len_cnn_multilayers(
inner_state, tgt_embeddings, pos_embeddings, n_tokens, embedding_size,
num_layers, filter_size, sos_id, max_action_len=10):
with tf.variable_scope("cnn_decoder", reuse=tf.AUTO_REUSE):
projection_layer = tf.layers.Dense(
units=n_tokens, use_bias=True, name="dense_tokens")
def decode_one(x, z):
# get state of last token
h = encoder_cnn_multilayers(
src=x, src_embeddings=tgt_embeddings,
pos_embeddings=pos_embeddings,
num_layers=num_layers,
filter_size=filter_size,
embedding_size=embedding_size)[:, -1, :]
# compute attention weight
a = tf.nn.softmax(tf.reduce_sum(
tf.multiply(tf.expand_dims(h, axis=1), z), axis=-1))
# compute attention vector
c = tf.reduce_sum(tf.multiply(a[:, :, tf.newaxis], z), axis=1)
# compute token distribution
y = projection_layer(h + c)
return y
def cond(i, out_ta, in_tn):
return tf.less(i, max_action_len)
def body(i, out_ta, in_tn):
token_readout = decode_one(in_tn, inner_state)
token_idx = tf.argmax(token_readout, axis=1, output_type=tf.int32)
out_ta = out_ta.write(i, token_readout)
in_tn = tf.concat([in_tn, token_idx[:, tf.newaxis]], axis=1)
i = tf.add(i, 1)
return [i, out_ta, in_tn]
start_i = tf.constant(0, dtype=tf.int32)
batch_size = tf.shape(inner_state)[0]
batch_sos = tf.tile([sos_id], [batch_size])
input_tn = batch_sos[:, tf.newaxis]
output_ta = tf.TensorArray(dtype=tf.float32, size=max_action_len)
_, output_ta, _ = tf.while_loop(
cond, body,
loop_vars=[start_i, output_ta, input_tn],
shape_invariants=[start_i.get_shape(), tf.TensorShape(None),
tf.TensorShape([None, None])])
q_actions | |
# -*- coding: utf-8 -*-
"""
flask.json
~~~~~~~~~~
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
import codecs
import io
import uuid
from datetime import date
from datetime import datetime
from itsdangerous import json as _json
from jinja2 import Markup
from werkzeug.http import http_date
from .._compat import PY2
from .._compat import text_type
from ..globals import current_app
from ..globals import request
try:
import dataclasses
except ImportError:
dataclasses = None
# Figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = "\\/" not in _json.dumps("/")
__all__ = [
"dump",
"dumps",
"load",
"loads",
"htmlsafe_dump",
"htmlsafe_dumps",
"JSONDecoder",
"JSONEncoder",
"jsonify",
]
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write("")
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default
encoder by also supporting ``datetime``, ``UUID``, ``dataclasses``,
and ``Markup`` objects.
``datetime`` objects are serialized as RFC 822 datetime strings.
This is the same as the HTTP date format.
In order to support more data types, override the :meth:`default`
method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, datetime):
return http_date(o.utctimetuple())
if isinstance(o, date):
return http_date(o.timetuple())
if isinstance(o, uuid.UUID):
return str(o)
if dataclasses and dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
if hasattr(o, "__html__"):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson decoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs, app=None):
"""Inject default arguments for dump functions."""
if app is None:
app = current_app
if app:
bp = app.blueprints.get(request.blueprint) if request else None
kwargs.setdefault(
"cls", bp.json_encoder if bp and bp.json_encoder else app.json_encoder
)
if not app.config["JSON_AS_ASCII"]:
kwargs.setdefault("ensure_ascii", False)
kwargs.setdefault("sort_keys", app.config["JSON_SORT_KEYS"])
else:
kwargs.setdefault("sort_keys", True)
kwargs.setdefault("cls", JSONEncoder)
def _load_arg_defaults(kwargs, app=None):
"""Inject default arguments for load functions."""
if app is None:
app = current_app
if app:
bp = app.blueprints.get(request.blueprint) if request else None
kwargs.setdefault(
"cls", bp.json_decoder if bp and bp.json_decoder else app.json_decoder
)
else:
kwargs.setdefault("cls", JSONDecoder)
def detect_encoding(data):
"""Detect which UTF codec was used to encode the given bytes.
The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
or little endian. Some editors or libraries may prepend a BOM.
:param data: Bytes in unknown UTF encoding.
:return: UTF encoding name
"""
head = data[:4]
if head[:3] == codecs.BOM_UTF8:
return "utf-8-sig"
if b"\x00" not in head:
return "utf-8"
if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
return "utf-32"
if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
return "utf-16"
if len(head) == 4:
if head[:3] == b"\x00\x00\x00":
return "utf-32-be"
if head[::2] == b"\x00\x00":
return "utf-16-be"
if head[1:] == b"\x00\x00\x00":
return "utf-32-le"
if head[1::2] == b"\x00\x00":
return "utf-16-le"
if len(head) == 2:
return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
return "utf-8"
def dumps(obj, app=None, **kwargs):
"""Serialize ``obj`` to a JSON-formatted string. If there is an
app context pushed, use the current app's configured encoder
(:attr:`~flask.Flask.json_encoder`), or fall back to the default
:class:`JSONEncoder`.
Takes the same arguments as the built-in :func:`json.dumps`, and
does some extra configuration based on the application. If the
simplejson package is installed, it is preferred.
:param obj: Object to serialize to JSON.
:param app: App instance to use to configure the JSON encoder.
Uses ``current_app`` if not given, and falls back to the default
encoder when not in an app context.
:param kwargs: Extra arguments passed to :func:`json.dumps`.
.. versionchanged:: 1.0.3
``app`` can be passed directly, rather than requiring an app
context for configuration.
"""
_dump_arg_defaults(kwargs, app=app)
encoding = kwargs.pop("encoding", None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, app=None, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs, app=app)
encoding = kwargs.pop("encoding", None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, app=None, **kwargs):
"""Deserialize an object from a JSON-formatted string ``s``. If
there is an app context pushed, use the current app's configured
decoder (:attr:`~flask.Flask.json_decoder`), or fall back to the
default :class:`JSONDecoder`.
Takes the same arguments as the built-in :func:`json.loads`, and
does some extra configuration based on the application. If the
simplejson package is installed, it is preferred.
:param s: JSON string to deserialize.
:param app: App instance to use to configure the JSON decoder.
Uses ``current_app`` if not given, and falls back to the default
encoder when not in an app context.
:param kwargs: Extra arguments passed to :func:`json.dumps`.
.. versionchanged:: 1.0.3
``app`` can be passed directly, rather than requiring an app
context for configuration.
"""
_load_arg_defaults(kwargs, app=app)
if isinstance(s, bytes):
encoding = kwargs.pop("encoding", None)
if encoding is None:
encoding = detect_encoding(s)
s = s.decode(encoding)
return _json.loads(s, **kwargs)
def load(fp, app=None, **kwargs):
"""Like :func:`loads` but reads from a file object."""
_load_arg_defaults(kwargs, app=app)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop("encoding", None) or "utf-8")
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = (
dumps(obj, **kwargs)
.replace(u"<", u"\\u003c")
.replace(u">", u"\\u003e")
.replace(u"&", u"\\u0026")
.replace(u"'", u"\\u0027")
)
if not _slash_escape:
rv = rv.replace("\\/", "/")
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(text_type(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
For clarity, the JSON serialization behavior has the following differences
from :func:`dumps`:
1. Single argument: Passed straight through to :func:`dumps`.
2. Multiple arguments: Converted to an array before being passed to
:func:`dumps`.
3. Multiple keyword arguments: Converted to a dict before being passed to
:func:`dumps`.
4. Both args and kwargs: Behavior undefined and will throw an exception.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@10.0.0.7",
"id": 42
}
.. versionchanged:: 0.11
Added support for serializing top-level arrays. This introduces a
security risk in ancient browsers. See :ref:`json-security` for details.
This function's response will be pretty printed if the
``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to True or the
Flask app is running in debug mode. Compressed (not pretty) formatting
currently means no indents and no spaces after separators.
.. versionadded:: 0.2
"""
indent = None
separators = (",", ":")
if current_app.config["JSONIFY_PRETTYPRINT_REGULAR"] or current_app.debug:
indent = 2
separators = (", ", ": ")
if args and kwargs:
raise TypeError("jsonify() behavior undefined when passed both args and kwargs")
elif len(args) == 1: # single args are passed directly to dumps()
| |
<reponame>sot/mica<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Generalized module for fetching and archiving obsid-organized
telemetry such as asp_l1 and obspar products.
"""
import os
import tempfile
from glob import glob
import re
import logging
import shutil
import numpy as np
import astropy.io.fits as pyfits
from pathlib import Path
import Ska.arc5gl
import Ska.DBI
from Chandra.Time import DateTime
import Ska.File
from astropy.table import Table
import mica
# borrowed from telem_archive
import csv
import gzip
def parse_obspar(file):
"""
Return the rows of an IRAF formatted obspar as a dictionary.
:param file: obspar file
:returns: row of obspar
:rtype: dictionary generator
"""
convert = {'i': int,
'r': float,
's': str}
try:
lines = gzip.open(file, 'rt', encoding='utf8', errors='ignore').readlines()
except IOError:
lines = open(file, 'rt', encoding='utf8', errors='ignore').readlines()
obs_read = csv.DictReader(lines,
fieldnames=('name', 'type', 'hidden', 'value',
'def1', 'def2', 'descr'),
dialect='excel')
for row in obs_read:
# this empty-string '' hack is not present in the original
if ((row['value'] == '')
and ((row['type'] == 'r') or (row['type'] == 'i'))):
row['value'] = None
else:
row['value'] = convert[row['type']](row['value'])
row['name'] = row['name'].replace('-', '_')
yield row
return
def get_obspar(obsparfile):
"""Get the obspar for obsid starting at tstart. Return as a dict."""
obspar = dict()
for row in parse_obspar(obsparfile):
obspar.update({row['name']: row['value']})
return obspar
class ProductVersionError(Exception):
pass
class ObsArchive:
"""
Object to store configuration, logging, and processing tasks
to fetch obsid telemetry from the CXC archive and store in a Ska
file archive, while logging the archive files to a file lookup
database.
The configuration dictionary ``config`` may have these key/values:
* data_root: directory for products
(example /proj/sot/ska/data/mica/archive/asp1)
* temp_root: directory for temporary storage of fetched
telemetry
* bad_obsids: file containing list of obsids that should be
ignored when in regular update mode
* cols: headers that will be included in file lookup table
* sql_def: sql file to build file lookup archfiles table
* apstat_table: axafapstat database table from which to find
new processing (by id)
* apstat_id: field in apstat_table to use as unique CXCDS
processing id
* label: label of product type for log messages
* small: arc5gl keyword/filetype for small file from products
(example asp1{fidprops}). This will be retrieved with
"get %s" % config['small'] and the retrieved files will
be used to determine product version.
* small_glob: glob to match files retrieved by
"get %s" % config[small]
(example '*fidpr*')
* small_ver_regex: regular expression to search for version from
retrieved files (example 'pcadf\\d+N(\\d{3})_')
* full: arc5gl keyword for products (example 'asp1')
* rebuild: If True/set, allow update mode to rebuild the database
from obsid 1.
:param config: configuration dictionary
:returns: ObsArchive instance
"""
def __init__(self, config):
self._config = config
self._logger = logging.getLogger('ObsArchive')
@property
def config(self):
return self._config
@property
def logger(self):
return self._logger
def set_env(self):
"""
Set environment included an arc5gl handle and
and a handle to the axafapstat database
"""
self._arc5 = Ska.arc5gl.Arc5gl()
self._apstat = dict(dbi='sybase', server='sqlsao',
database='axafapstat')
self._aca_db = dict(dbi='sybase', server='sybase',
user='aca_read')
config = self.config
db_file = os.path.join(os.path.abspath(config['data_root']),
'archfiles.db3')
if not os.path.exists(db_file) or os.stat(db_file).st_size == 0:
if not os.path.exists(config['data_root']):
os.makedirs(config['data_root'])
self.logger.info("creating archfiles db from %s"
% config['sql_def'])
db_sql = Path(__file__).parent / config['sql_def']
db_init_cmds = open(db_sql).read()
with Ska.DBI.DBI(dbi='sqlite', server=db_file,
autocommit=False) as db:
db.execute(db_init_cmds, commit=True)
db = dict(dbi='sqlite', server=db_file,
autocommit=False)
self._archfiles_db = db
def set_read_env(self):
"""
Set environment included an arc5gl handle and
and a handle to the axafapstat database
"""
config = self.config
db_file = os.path.join(os.path.abspath(config['data_root']),
'archfiles.db3')
db = dict(dbi='sqlite', server=db_file,
autocommit=False)
self._archfiles_db = db
def get_all_obspar_info(self, i, f, archfiles):
"""
Read obspar and add 'obsid' and 'filename' keys to the dictionary
i and archfiles are just passed to make the logging prettier.
"""
logger = self.logger
filename = os.path.basename(f)
logger.debug('Reading (%d / %d) %s' % (i, len(archfiles), filename))
obspar = get_obspar(f)
obspar['obsid'] = obspar['obs_id']
obspar['filename'] = filename
return obspar
def get_files(self, obsid=None, start=None, stop=None,
revision=None, content=None):
data_root = self.config['data_root']
if obsid is None:
if start is None or stop is None:
raise TypeError("Must supply either obsid or start and stop")
file_records = self._get_file_records(obsid=obsid,
start=start, stop=stop,
revision=revision,
content=content)
files = [os.path.join(data_root,
("%05d" % f['obsid'])[0:2],
"%05d_v%02d" % (f['obsid'], f['revision']),
str(f['filename']))
for f in file_records]
return files
def _get_file_records(self, obsid=None, start=None, stop=None,
revision=None, content=None):
self.set_read_env()
tstart_pad = 10 * 86400
if content is None:
content = self.config['content_types']
if type(content) == str:
content = [content]
content_str = ','.join(["'%s'" % x for x in content])
if obsid is None:
if start is None or stop is None:
raise TypeError("Must supply either obsid or start and stop")
tstart = DateTime(start).secs
tstop = DateTime(stop).secs
db_query = ("SELECT * from archfiles "
"WHERE tstart >= %f - %f "
"AND tstart < %f "
"AND tstop > %f "
"AND content in (%s) "
% (tstart, tstart_pad, tstop, tstart, content_str))
else:
db_query = ('SELECT * from archfiles '
'WHERE obsid = %d '
'AND content in (%s) '
% (obsid, content_str))
if revision is None:
db_query += 'AND isdefault = 1 '
else:
if revision == 'last':
if obsid is not None:
db_query += """AND revision in
(SELECT max(revision) from archfiles
WHERE obsid = %d)""" % obsid
else:
# The no-obsid case is handled below. The has-obsid case
# could probably be pushed down there too, but the db filter is OK.
pass
elif revision == 'all':
pass
else:
db_query += 'AND revision = %d ' % revision
db_query += "order by tstart"
with Ska.DBI.DBI(**self._archfiles_db) as db:
files = db.fetchall(db_query)
# For the special case of "revision = last" without obsid, filter the results one a per-file
# basis by obsid (could be multiple obsids in the date range)
if revision == 'last' and obsid is None:
max_rev = {}
for obsid in np.unique(files['obsid']):
ok = files['obsid'] == obsid
max_rev[obsid] = np.max(files[ok]['revision'])
rev_ok = [f['revision'] == max_rev[f['obsid']] for f in files]
files = files[np.array(rev_ok)]
return files
def get_dir(self, obsid):
"""
Return the latest released directory for an obsid
Return None if there are no 'default' / released products.
"""
dirmap = self.get_obs_dirs(obsid)
if 'default' in dirmap:
return dirmap['default']
else:
return None
def get_obs_dirs(self, obsid):
"""
Return a dictionary of the directories available for an obsid.
This is just done with a glob in the data directories.
"""
data_root = self._config['data_root']
strobs = "%05d" % obsid
chunk_dir = strobs[0:2]
topdir = os.path.join(data_root, chunk_dir)
dirmap = dict(revisions=[])
verdirs = glob(os.path.join(topdir, "%s_v*" % strobs))
if not verdirs:
return None
for v in verdirs:
nmatch = re.search(r"%s_v(\d{2})" % strobs, v)
if nmatch:
dirmap[int(nmatch.group(1))] = v
dirmap['revisions'].append(int(nmatch.group(1)))
lastdirs = glob(os.path.join(topdir, "%s_last" % strobs))
defdirs = glob(os.path.join(topdir, "%s" % strobs))
if defdirs:
dirmap['default'] = defdirs[0]
if lastdirs:
dirmap['last'] = lastdirs[0]
else:
if defdirs:
dirmap['last'] = defdirs[0]
return dirmap
@staticmethod
def get_file_ver(tempdir, fileglob, ver_regex):
"""
Determine the version/revision of a set of archived files from
their file names.
:param tempdir: directory containing files
:param fileglob: glob to match files in question
:param ver_regex: regular expression to pull out version
from the set of files
:returns: version number
:rtype: integer
"""
files = glob(os.path.join(tempdir, fileglob))
if not files:
return None
versions = {}
for f in files:
fmatch = re.search(ver_regex, f)
if fmatch:
versions[int(fmatch.group(1))] = 1
if len(versions) > 1:
raise ValueError("Different version files in %s" % tempdir)
# update version to number
version = list(versions.keys())[0]
return version
def get_ver_num(self, obsid, version='default'):
"""
Determine the version number associated with the current released
products or with the products referenced by "version=last".
:param obsid: obsid
:param version: version string ('default'|'last')
:returns: version
:rtype: integer
"""
arc5 = self._arc5
apstat = self._apstat
logger = self.logger
config = self.config
# this is obi agnostic, all obis should be same
# version anyway...u
tempdir = tempfile.mkdtemp()
# if multi-obi, we'll need to be specific
arc5.sendline("reset")
arc5.sendline("cd %s" % tempdir)
arc5.sendline("obsid=%d" % obsid)
with Ska.DBI.DBI(**apstat) as db:
obis = db.fetchall(
"select distinct obi from obidet_0_5 where obsid = %d" % obsid)
if len(obis) > 1:
minobi = np.min(obis['obi'])
logger.debug("limiting arc5gl to obi %d" % minobi)
arc5.sendline("obi=%d" % minobi)
if version != 'default':
arc5.sendline("version=%s" % version)
# just get a small file
arc5.sendline("get %s" % config['small'])
| |
restrict min(nio)
# Z update?
# C_Z function
# prediction function
# test uMDL regression function
# test DNML regression
return Out
################################################################################
#######
####
def fit_UMDL(tr_y2train,tr_X2train,tr_X2train_cl,lambdas, sigmap, par, cli=0):
#par = {'nit': nit, 'relTol': it_stop, 'alpha_sigma': alpha_sigma, 'beta_sigma': beta_sigma,
# 'alpha_lambda': alpha_lambda, 'beta_lambda': beta_lambda, 'n_jumps': n_jumps, 'dn_jumps': dn_jumps, 'device':cuda0 }
cuda0 = par['device']
n_c = torch.max(tr_X2train_cl[:,cli])+1
n_dim = tr_X2train.shape[1]
n_samples = tr_X2train.shape[0]
n_out = tr_y2train.shape[1]
zo = tr_X2train_cl[:,cli]
# find uMDL ridge regression weights for each output cluster o (q: for several n(o|i) sets ?)
#Lambdas = []
Lambdas = torch.zeros([n_dim, n_c], dtype=torch.float, device=cuda0)
Wp = []
Sigmap = []
Lo0=[]
#print(n_c)
for o in range(n_c):
#Co = torch.sum(Ci[i4o[o]]).t()
#C = torch.sum(tr_X2train*tr_X2train).t()
n_samples = tr_X2train.shape[0]
#wp, lambdas2, sigmap2, L = torchDNML(tr_y2train[zio[o],:], tr_X2train[zio[o],:], tr_X2train_cl[zio[o],:], lambdas, sigmap, par)
wp, lambdas2, sigmap2, L = torchuMDL(tr_y2train[zo==o,:], tr_X2train[zo==o,:], lambdas, sigmap, par)
Wp = Wp + [wp]
#Lambdas = Lambdas + [lambdas2]
Lambdas[:,[o]] = lambdas2
Sigmap = Sigmap + [sigmap2]
Lo0 = Lo0 + [L]
Out = {'Wp': Wp, 'Lambdas': Lambdas,'Sigmap':Sigmap, 'Lo':Lo0}
return Out
def data_preprocessing(X_train0, N_bootstraps=1, Dn=1, stp_n=[], classes_columns_train=[], ge_columns_train=[],annot_columns_train=[],
stp_columns_train=[], do_filter_gene_set = False, do_normalize = 1, do_log_y = 1,
do_remove_scvi_latent_factors=False, do_remove_non_scvi_latent_factors=False,df_ge_names_filter=[],lf_scvi_names=[],
remove_st=[],d_log=0.3, gene_set_names=''):
'''
Selection and preprocessing of genes_STP data before model training
'''
Dn = 1
#N_bootstraps = 100 #200
Dn2 = int(N_bootstraps/Dn) # 100 - should be a number of bootstraps per synapse type !!!
# 0ll
####
#### SELECT X
####
cla_n = classes_columns_train #['ex_inh']
cla_n2 = pd.Series(cla_n)
#ge_n = imp50.index[0:25].tolist()
#ge_n = lf_scvi_names #ge_columns_train2
# df_ge_names_iRF50 = pd.read_excel(d4+'iRF_found_50_best_genes.xlsx',header=None)
# ipost = df_ge_names_iRF50.iloc[:,0].str.contains('post_')
# ge_names_irf = df_ge_names_iRF50.iloc[:,0]
# ge_names_irf.loc[ipost] = 'post__' + ge_names_irf.str.split('post_',expand=True).loc[:,1].loc[ipost].copy()
ge_columns_train2 = pd.Series(ge_columns_train)
# ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!!
do_filter_gene_set = False
do_remove_scvi_latent_factors=False
do_remove_non_scvi_latent_factors=False
ge_columns_train2 =ge_columns_train2.loc[ge_columns_train2.isin(['samples_pre', 'samples_post'])==False]
if do_filter_gene_set==True:
#d5 = '/content/drive/My Drive/Colab Notebooks/'
#df_ge_names_filter = pd.read_excel(d5+'gene_set_names.xlsx',header=None).loc[1:,1]
df_ge_names_filter = pd.read_excel(gene_set_names,header=None).loc[1:,1]
#ge_n = df_ge_names_iRF50.iloc[:,0].values #ge_columns_train2 #ge_columns_train #imp50.index[0:25].tolist()
print('size of filter gene set ',df_ge_names_filter.shape)
ge_columns_train2 = ge_columns_train2.loc[ge_columns_train2.isin(df_ge_names_filter)]
if do_remove_scvi_latent_factors==True:
ge_columns_train2 =ge_columns_train2.loc[ge_columns_train2.isin(lf_scvi_names)==False].values # ACHTUNG!!! remove scvi-latent facrors !!!!!
if do_remove_non_scvi_latent_factors==True:
ge_columns_train2 =ge_columns_train2.loc[ge_columns_train2.isin(lf_scvi_names)==True].values # ACHTUNG!!! remove all feature with except scvi-latent facrors !!!!!
print('size of final gene set ',ge_columns_train2.shape)
# ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!! # ACHTUNG!!!
ge_n = ge_columns_train2
#X2 = X_train0.loc[:,annot_columns_train + ge_n + cla_n]
# pure cortex
#X3 = X_train0.iloc[0:5300,: ]
# pure hipp
#X3 = X_train0.iloc[5300:,: ]
#CHECK REMOVED INDEXES!!!
#remove_st = [4,11,54,56,61,62,67,77,78,81,85,88,89,90,92]
#remove_st = [4,77,78,89,90]
#remove_st = [4,90]
#remove_st = [4]
#remove_st = []
all_samples = np.ones(X_train0.shape[0])
for st in remove_st:
ii = st*N_bootstraps + np.arange(N_bootstraps)
all_samples[ii] = 0
X3 = X_train0.iloc[all_samples==1,:]
Dn3 = int(X3.shape[0]/N_bootstraps) # number of synapse_types
# 100ll
X2 = X3.loc[:,ge_n ]
X2_cl = X3.loc[:,cla_n ]
X2_an = X3.loc[:,annot_columns_train ]
#i_cl = np.nonzero(X2.columns.isin(cla_n))[0]
nannot = len(annot_columns_train)
lge_n = len(ge_n)
if len(cla_n)>0:
i_cl = np.nonzero(X2.columns.isin(cla_n))[0] - nannot
#X2.loc[:,cla_n] = X2.loc[:,cla_n]+1
X2=X2.iloc[0:X_train0.shape[0]:Dn,:].values
X2_cl=X2_cl.iloc[0:X_train0.shape[0]:Dn,:].values
X2_an=X2_an.iloc[0:X_train0.shape[0]:Dn,:].values
#stp_n = ['A2_20Hz','A3_20Hz','A4_20Hz','A5_20Hz','A2_50Hz','A3_50Hz','A4_50Hz','A5_50Hz']
#stp_n = ['A5_20Hz']
iy0=np.nonzero(np.array(stp_columns_train)==stp_n[0])[0]
# 'A2_20Hz',
# 'A5_20Hz',
# 'A250_20Hz',
# 'A1000_20Hz',
# 'A2_50Hz',
# 'A2_10Hz',
# 'A5_50Hz',
# 'A5_10Hz',
y2 = X3.loc[:,stp_n].iloc[0:X_train0.shape[0]:Dn,:].values
#do_normalize = 1
#do_log_y = 1
#from sklearn import preprocessing
if do_log_y==1:
#d_log=0.3
y2 = np.log(y2.astype(float)+d_log)
if do_normalize==1:
scale_y2 = np.std(y2[:,:].astype(float),axis=0)
X2[:,:] = preprocessing.scale(X2[:,:])
y2[:,:] = y2.astype(float)/scale_y2
mean_y2 = np.mean(y2[:,:] , axis=0)
y2[:,:] = y2[:,:] - mean_y2
# X2[:,:] = preprocessing.scale(X2[:,:])
# y2[:,:] = preprocessing.scale(y2[:,:])
#mod_index = modf.index #[3] # modf.index #[0, 3, 9, 11, 13, 15]
sts = pd.DataFrame(X3.iloc[0:X3.shape[0]:Dn2,:].iloc[:,0:nannot],columns = annot_columns_train)
sts = sts['cell_type2_pre'].map(str)+'_'+ sts['layer_pre'].map(str)+' -> '+ sts['cell_type2_post'].map(str)+'_'+sts['layer_post'].map(str)
#sts.to_excel('temp.xlsx')
preprocessing_ = {'do_log_y':do_log_y, 'do_normalize':do_normalize,
'd_log':d_log, 'scale_y2':scale_y2}
return X2, y2, X2_cl, X2_an, sts, preprocessing_, cla_n2
def train_and_test_regression_models(X2, y2, X2_cl, X2_an, H_Models, preprocessing_, ncv = 10, sts=[], cla_n2=[], cuda0=None):
'''
Train and test regression model for genes->STP
'''
do_log_y = preprocessing_['do_log_y']
do_normalize = preprocessing_['do_normalize']
if do_log_y==1:
d_log = preprocessing_['d_log']
if do_normalize==1:
scale_y2 = preprocessing_['scale_y2']
# #mod_index = modf.index #[3] # modf.index #[0, 3, 9, 11, 13, 15]
# sts = pd.DataFrame(X3.iloc[0:X3.shape[0]:Dn2,:].iloc[:,0:nannot],columns = annot_columns_train)
# sts = sts['cell_type2_pre'].map(str)+'_'+ sts['layer_pre'].map(str)+' -> '+ sts['cell_type2_post'].map(str)+'_'+sts['layer_post'].map(str)
# #sts.to_excel('temp.xlsx')
Y_pred = [], Y_pred0 = [], Samples_test = []
for i,mdn in enumerate(H_Models.loc[:,'name']):
# HM_name = 'Ms1c2'
md = H_Models.loc[H_Models.loc[:,'name']==mdn, :]
#md = modf.loc[i,:]
#mdn = md['name']
print(mdn)
if type(md['structure'].values[0][0])==str:
cli = cla_n2.index[cla_n2.isin(md['structure'].values[0])].values
ncli = 1
else:
cli=[]
cli2=md['structure'].values[0]
ncli = len(cli2)
for iii in range(len(cli2)):
cli = cli + [cla_n2.index[cla_n2.isin(cli2[iii])].values]
#mdcl2 = np.char.strip(np.array(str.split(md['classes_post'],',')))
#mdcl1 = np.char.strip(np.array(str.split(md['classes'],',')))
# 300ll
X3_cl = np.copy(X2_cl)
if ncli==1:
cli5 = [cli]
else:
cli5 = cli
print(set(X2_cl[:,cli5[0][0]]))
if len(cli)>1:
print(set(X2_cl[:,cli5[0][1]]))
else:
print('all')
for iii in range(ncli):
cli6 = cli5[iii]
for icl, cli2 in enumerate(cli6):
if icl==0:
n2n=pd.DataFrame(list(set(X2_cl[:,cli2]))).reset_index().set_index(0)
X3_cl[:,cli2] = n2n.loc[X2_cl[:,cli2]].values.ravel()
cli20 = cli2
else:
# convert class-names to numbers for cli2
n2n=pd.DataFrame(list(set(X2_cl[:,cli2]))).reset_index().set_index(0)
X4_cl = n2n.loc[X2_cl[:,cli2]].values.ravel()
# combine cli2 and cli20 classes
X4_cl = X4_cl + n2n.shape[0]*X3_cl[:,cli20]
n2n = pd.DataFrame(list(set(X4_cl))).reset_index().set_index(0)
X3_cl[:,cli2] = n2n.loc[X4_cl].values.ravel()
cli20=cli2
#### cli??
y_pred = np.zeros((0,y2.shape[1]))
y_pred0 = np.zeros((y2.shape[0],y2.shape[1]))
#ncv = 10
#ncv = 10
n_samp_cv = np.rint(X2.shape[0]/ncv)
samples_all = np.arange(X2.shape[0])
r2cv = np.zeros(ncv+1)
r3cv = np.zeros(ncv+1)
r4cv = np.zeros(ncv+1)
nonzs = np.array([])
Lj=[]
# DNML
SSEy_dnml_tra = []
SSEy_dnml_gen = []
y_pred_dnml = []
for icv in range(ncv+1): #range(ncv): # cross-validation cycle
if icv<ncv-1:
samples_test = (np.arange(n_samp_cv) + icv*n_samp_cv).astype(int)
samples_train = np.delete(np.copy(samples_all),samples_test)
else:
n_samp_cv2 = int(n_samp_cv/2)
samples_test = (np.arange(n_samp_cv2 ) + (ncv-1)*n_samp_cv+(icv-ncv+1)*n_samp_cv2 ).astype(int)
samples_train = np.delete(np.copy(samples_all),samples_test)
X2train, y2train, X2train_cl = X2[samples_train,:], y2[samples_train,:], X3_cl[samples_train,:]
X2test, y2test, X2test_cl = X2[samples_test,:], y2[samples_test,:], X3_cl[samples_test,:]
if (mdn!='BHLM')&(mdn!='HLM'):
alpha_1 = 1e-6
alpha_2 = 1e-6
lambda_1 = 1e-6
lambda_2 = 1e-6
threshold_lambda=10000.0,
#model, support = fit_classes_tree(X2train,y2train,X2train_cl,cli,model_type='HuberRegressor',
# nmin = 0,n_iter=100, alpha=1)
#model, support = fit_classes_tree(X2train,y2train,X2train_cl,cli,model_type='ARDRegression',
# nmin = 0,n_iter=300, alpha_1=1e-06, alpha_2=1e-06,
# lambda_1=1e-06, lambda_2=1e-06)
model, support = fit_classes_tree(X2train,y2train,X2train_cl,cli,model_type='elastic_net',
nmin = 0, alpha=0.5, l1_ratio=0.01)
#model, support = fit_classes_tree(X2train,y2train,X2train_cl,cli,model_type='ridge',nmin = 0, alpha=1)
# model, support = fit_classes_tree(X2train,y2train,X2train_cl,cli,model_type='ARDRegression',
# nmin = 0, alpha_1=alpha_1, alpha_2=alpha_2,
# lambda_1=lambda_1, lambda_2=lambda_2)
y_pred_i = predict_classes_tree(model,X2test,X2test_cl,cli,nout=y2.shape[1],nmin = 0)
for iy in range(y2.shape[1]): # n_out = y2.shape[1]
nonz = y_pred_i[:,iy]!=0
y_pred_i[nonz==False,iy] = np.mean(y2[:,iy])
y_pred = np.concatenate([y_pred, y_pred_i],axis=0)
#elif (mdn=='BHLM'):
if mdn=='HLM':
if icv==0:
X4_cl_df = pd.DataFrame(X3_cl) #X3_cl_df.copy()
#X4_cl_df.loc[:,cli[1][1]] = X4_cl_df.loc[:,cli[1][1]]+X4_cl_df.loc[:,cli[2][1]].max()+1
X4_cl_df.loc[:,cli[0][1]] = X4_cl_df.loc[:,cli[0][1]]+X4_cl_df.loc[:,cli[1][1]].max()+1
X4_cl_df = X4_cl_df.loc[:,[cli[0][1],cli[1][1]]] #,cli[2][1]]]
X4_cl = X4_cl_df.values
#X4_cl = X4_cl # 1 synapse type model
#X4_cl = X4_cl[:,[0,1,2]]
#X4_cl = X4_cl[:,[0,1]]
X4_cl = X4_cl[:,[0,1]] # ACHTUNG!!! - contradiction?: for training X4_cl should be - rows of cluster tree , for testing - [zo, zi]
nc = np.max(X4_cl[:,[0,1]])+1
X4_cl = np.concatenate([nc*np.ones([X4_cl.shape[0],1]), X4_cl ], axis=1) # add root cluster
scl4 = list(set(X4_cl[:,-1])) # ACHTUNG!!! - assumption - input clusters are in the last columns
# tree of clusters
#Oi = [np.array([0,3,4]), np.array([1,3,4]), np.array([2,4])] # parents of each leaf cluster
nI = len(scl4)
Oi = [] # Oi - list: for each input clusters - indices of all parent clusters
for iclu in range(nI):
is_iclu = X4_cl[:,-1]==scl4[iclu]
x4_iclu = X4_cl[is_iclu,:]
Oi = Oi + [np.flip(x4_iclu[0,:]).astype('int')]
#M = {'W': W, 'Oi': Oi, 'R': R, 'RI': RI, 'Ki': Ki, 'Sig': Sig, 'Tree':Tr} #, 'n_samples': n_samples}
M = {'Oi':Oi}
#M['Oi'] = Oi
#c_dim2 = len(set(X4_cl[:,-1])) # number of smallest clusters
#c_dim1 = X4_cl.shape[1]
#c_dim = X4_cl.max()+1
output_dim = y2train.shape[1]
input_dim = X2train.shape[1]
# 400ll
X4train_cl, X4test_cl = X4_cl[samples_train,:], X4_cl[samples_test,:]
# ########## fit model
##################################################################
X4train_cl, X4test_cl = X4_cl[samples_train,:], X4_cl[samples_test,:]
# add intercept
do_intercept = 1
if do_intercept==1:
X2train = np.concatenate([X2train, np.ones([X2train.shape[0],1])], axis=1)
X2test = np.concatenate([X2test, np.ones([X2test.shape[0],1])], axis=1)
#ng = X2train.shape[1]
input_dim = X2train.shape[1]
# convert data to pytorch variables
# y_pred_i = np.copy(linear_reg_model.forward(x_data_test,x_dim).data.cpu().numpy())
tr_y2train = Variable(torch.from_numpy(y2train.astype('float32')).to(cuda0))
tr_X2train = Variable(torch.from_numpy(X2train.astype('float32')).to(cuda0))
tr_X2train_cl = Variable(torch.from_numpy(X4train_cl.astype('long')).to(cuda0))
tr_X2test = Variable(torch.from_numpy(X2test.astype('float32')).to(cuda0))
tr_X2test_cl = Variable(torch.from_numpy(X4test_cl.astype('long')).to(cuda0))
# convert parameters to pytorch variables
sig = md['parameters']['sig'] #0.4 # 0.4 - up to median R2 = 65%, nit=15; R2=70% nit=5 for scvi_lf? #
lyambda0 = md['parameters']['lyambda0'] #50 #50 #0.5 -?bad?? # 50 - up to median R2 = 65%, nit=15; R2=70 nit=5 for scvi_lf? #
ng = X2train.shape[1]
lambdas = Variable(torch.from_numpy((lyambda0*np.ones((ng,1))).astype('float32')).to(cuda0)) #0.5*torch.tensor(np.ones(ng,1))
sigmap = Variable(torch.from_numpy(np.array(sig).astype('float32')).to(cuda0))
#sigmap = Variable(torch.from_numpy((np.ones((ng,1))*sig).astype('float32')).to(cuda0))
# 357ll
# set parameters for weights regression regularization
s2=2*sigmap**2
alpha_sigma | |
<reponame>rfrye-github/ixnetwork_restpy
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class TestConfig(Base):
"""It gives the test configuration
The TestConfig class encapsulates a required testConfig resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'testConfig'
_SDM_ATT_MAP = {
'BinaryResolutionSlaveNumber': 'binaryResolutionSlaveNumber',
'Duration': 'duration',
'EnableSlavesPassFail': 'enableSlavesPassFail',
'IncrementStepSlaveNumber': 'incrementStepSlaveNumber',
'InitialBinarySlaveNumber': 'initialBinarySlaveNumber',
'InitialStepSlaveNumber': 'initialStepSlaveNumber',
'LoadType': 'loadType',
'MaxBinarySlaveNumber': 'maxBinarySlaveNumber',
'MaxOutstanding': 'maxOutstanding',
'MaxStepSlaveNumber': 'maxStepSlaveNumber',
'MinBinarySlaveNumber': 'minBinarySlaveNumber',
'NumberOfSlavesPassFail': 'numberOfSlavesPassFail',
'Numtrials': 'numtrials',
'ProtocolItem': 'protocolItem',
'Runmode': 'runmode',
'SetupRate': 'setupRate',
'StartTraffic': 'startTraffic',
'TeardownRate': 'teardownRate',
'UseExistingSetupRate': 'useExistingSetupRate',
}
def __init__(self, parent):
super(TestConfig, self).__init__(parent)
@property
def BinaryResolutionSlaveNumber(self):
"""
Returns
-------
- number: Specifies the binary resolution slave number.
"""
return self._get_attribute(self._SDM_ATT_MAP['BinaryResolutionSlaveNumber'])
@BinaryResolutionSlaveNumber.setter
def BinaryResolutionSlaveNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['BinaryResolutionSlaveNumber'], value)
@property
def Duration(self):
"""
Returns
-------
- number: The duration of the test in hours, minutes, or seconds, which is used to calculate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Duration'])
@Duration.setter
def Duration(self, value):
self._set_attribute(self._SDM_ATT_MAP['Duration'], value)
@property
def EnableSlavesPassFail(self):
"""
Returns
-------
- str: If true, enables slaves pass fail.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableSlavesPassFail'])
@EnableSlavesPassFail.setter
def EnableSlavesPassFail(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableSlavesPassFail'], value)
@property
def IncrementStepSlaveNumber(self):
"""
Returns
-------
- number: The incremental step value for the slave number.
"""
return self._get_attribute(self._SDM_ATT_MAP['IncrementStepSlaveNumber'])
@IncrementStepSlaveNumber.setter
def IncrementStepSlaveNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['IncrementStepSlaveNumber'], value)
@property
def InitialBinarySlaveNumber(self):
"""
Returns
-------
- number: The initial incremental value of the binary slave number.
"""
return self._get_attribute(self._SDM_ATT_MAP['InitialBinarySlaveNumber'])
@InitialBinarySlaveNumber.setter
def InitialBinarySlaveNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['InitialBinarySlaveNumber'], value)
@property
def InitialStepSlaveNumber(self):
"""
Returns
-------
- number: The initial step value of the slave number.
"""
return self._get_attribute(self._SDM_ATT_MAP['InitialStepSlaveNumber'])
@InitialStepSlaveNumber.setter
def InitialStepSlaveNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['InitialStepSlaveNumber'], value)
@property
def LoadType(self):
"""
Returns
-------
- str(binary | step): The type of the payload setting.
"""
return self._get_attribute(self._SDM_ATT_MAP['LoadType'])
@LoadType.setter
def LoadType(self, value):
self._set_attribute(self._SDM_ATT_MAP['LoadType'], value)
@property
def MaxBinarySlaveNumber(self):
"""
Returns
-------
- number: The maximum value of the binary slave number.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxBinarySlaveNumber'])
@MaxBinarySlaveNumber.setter
def MaxBinarySlaveNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxBinarySlaveNumber'], value)
@property
def MaxOutstanding(self):
"""
Returns
-------
- number: The maximum oustanding value of the slave scalability.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstanding'])
@MaxOutstanding.setter
def MaxOutstanding(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxOutstanding'], value)
@property
def MaxStepSlaveNumber(self):
"""
Returns
-------
- number: The maximum step value of the slave number.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxStepSlaveNumber'])
@MaxStepSlaveNumber.setter
def MaxStepSlaveNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxStepSlaveNumber'], value)
@property
def MinBinarySlaveNumber(self):
"""
Returns
-------
- number: The minimum binary value of the slave number.
"""
return self._get_attribute(self._SDM_ATT_MAP['MinBinarySlaveNumber'])
@MinBinarySlaveNumber.setter
def MinBinarySlaveNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['MinBinarySlaveNumber'], value)
@property
def NumberOfSlavesPassFail(self):
"""
Returns
-------
- number: The number of slaves pass fail.
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfSlavesPassFail'])
@NumberOfSlavesPassFail.setter
def NumberOfSlavesPassFail(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberOfSlavesPassFail'], value)
@property
def Numtrials(self):
"""
Returns
-------
- number: The number of trials.
"""
return self._get_attribute(self._SDM_ATT_MAP['Numtrials'])
@Numtrials.setter
def Numtrials(self, value):
self._set_attribute(self._SDM_ATT_MAP['Numtrials'], value)
@property
def ProtocolItem(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/vport | /api/v1/sessions/1/ixnetwork/vport/.../lan]): Protocol Items
"""
return self._get_attribute(self._SDM_ATT_MAP['ProtocolItem'])
@ProtocolItem.setter
def ProtocolItem(self, value):
self._set_attribute(self._SDM_ATT_MAP['ProtocolItem'], value)
@property
def Runmode(self):
"""
Returns
-------
- str(duration | noframes): It gives the run mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Runmode'])
@Runmode.setter
def Runmode(self, value):
self._set_attribute(self._SDM_ATT_MAP['Runmode'], value)
@property
def SetupRate(self):
"""
Returns
-------
- number: The setup rate.
"""
return self._get_attribute(self._SDM_ATT_MAP['SetupRate'])
@SetupRate.setter
def SetupRate(self, value):
self._set_attribute(self._SDM_ATT_MAP['SetupRate'], value)
@property
def StartTraffic(self):
"""
Returns
-------
- str: It starts the traffic
"""
return self._get_attribute(self._SDM_ATT_MAP['StartTraffic'])
@StartTraffic.setter
def StartTraffic(self, value):
self._set_attribute(self._SDM_ATT_MAP['StartTraffic'], value)
@property
def TeardownRate(self):
"""
Returns
-------
- number: The teardown rate.
"""
return self._get_attribute(self._SDM_ATT_MAP['TeardownRate'])
@TeardownRate.setter
def TeardownRate(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeardownRate'], value)
@property
def UseExistingSetupRate(self):
"""
Returns
-------
- bool: If True, it uses the Existing Setup rate
"""
return self._get_attribute(self._SDM_ATT_MAP['UseExistingSetupRate'])
@UseExistingSetupRate.setter
def UseExistingSetupRate(self, value):
self._set_attribute(self._SDM_ATT_MAP['UseExistingSetupRate'], value)
def update(self, BinaryResolutionSlaveNumber=None, Duration=None, EnableSlavesPassFail=None, IncrementStepSlaveNumber=None, InitialBinarySlaveNumber=None, InitialStepSlaveNumber=None, LoadType=None, MaxBinarySlaveNumber=None, MaxOutstanding=None, MaxStepSlaveNumber=None, MinBinarySlaveNumber=None, NumberOfSlavesPassFail=None, Numtrials=None, ProtocolItem=None, Runmode=None, SetupRate=None, StartTraffic=None, TeardownRate=None, UseExistingSetupRate=None):
"""Updates testConfig resource on the server.
Args
----
- BinaryResolutionSlaveNumber (number): Specifies the binary resolution slave number.
- Duration (number): The duration of the test in hours, minutes, or seconds, which is used to calculate.
- EnableSlavesPassFail (str): If true, enables slaves pass fail.
- IncrementStepSlaveNumber (number): The incremental step value for the slave number.
- InitialBinarySlaveNumber (number): The initial incremental value of the binary slave number.
- InitialStepSlaveNumber (number): The initial step value of the slave number.
- LoadType (str(binary | step)): The type of the payload setting.
- MaxBinarySlaveNumber (number): The maximum value of the binary slave number.
- MaxOutstanding (number): The maximum oustanding value of the slave scalability.
- MaxStepSlaveNumber (number): The maximum step value of the slave number.
- MinBinarySlaveNumber (number): The minimum binary value of the slave number.
- NumberOfSlavesPassFail (number): The number of slaves pass fail.
- Numtrials (number): The number of trials.
- ProtocolItem (list(str[None | /api/v1/sessions/1/ixnetwork/vport | /api/v1/sessions/1/ixnetwork/vport/.../lan])): Protocol Items
- Runmode (str(duration | noframes)): It gives the run mode
- SetupRate (number): The setup rate.
- StartTraffic (str): It starts the traffic
- TeardownRate (number): The teardown rate.
- UseExistingSetupRate (bool): If True, it uses the Existing Setup rate
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(InputParameters=string)list
-------------------------------
- InputParameters (str): The input arguments of the test.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
| |
# Gamma curve highest seg slope
_OV7670_REG_GAM_BASE = const(0x7B) # Gamma register base (1 of 15)
_OV7670_GAM_LEN = const(15) # Number of gamma registers
_OV7670_R76_BLKPCOR = const(0x80) # REG76 black pixel corr enable
_OV7670_R76_WHTPCOR = const(0x40) # REG76 white pixel corr enable
_OV7670_REG_RGB444 = const(0x8C) # RGB 444 control
_OV7670_R444_ENABLE = const(0x02) # RGB444 enable
_OV7670_R444_RGBX = const(0x01) # RGB444 word format
_OV7670_REG_DM_LNL = const(0x92) # Dummy line LSB
_OV7670_REG_LCC6 = const(0x94) # Lens correction option 6
_OV7670_REG_LCC7 = const(0x95) # Lens correction option 7
_OV7670_REG_HAECC1 = const(0x9F) # Histogram-based AEC/AGC ctrl 1
_OV7670_REG_HAECC2 = const(0xA0) # Histogram-based AEC/AGC ctrl 2
_OV7670_REG_SCALING_PCLK_DELAY = const(0xA2) # Scaling pixel clock delay
_OV7670_REG_BD50MAX = const(0xA5) # 50 Hz banding step limit
_OV7670_REG_HAECC3 = const(0xA6) # Histogram-based AEC/AGC ctrl 3
_OV7670_REG_HAECC4 = const(0xA7) # Histogram-based AEC/AGC ctrl 4
_OV7670_REG_HAECC5 = const(0xA8) # Histogram-based AEC/AGC ctrl 5
_OV7670_REG_HAECC6 = const(0xA9) # Histogram-based AEC/AGC ctrl 6
_OV7670_REG_HAECC7 = const(0xAA) # Histogram-based AEC/AGC ctrl 7
_OV7670_REG_BD60MAX = const(0xAB) # 60 Hz banding step limit
_OV7670_REG_ABLC1 = const(0xB1) # ABLC enable
_OV7670_REG_THL_ST = const(0xB3) # ABLC target
_OV7670_REG_SATCTR = const(0xC9) # Saturation control
_OV7670_REG_LAST = const(_OV7670_REG_SATCTR) # Maximum register address
# Manual output format, RGB, use RGB565 and full 0-255 output range
_OV7670_rgb = bytes(
[
_OV7670_REG_COM7,
_OV7670_COM7_RGB,
_OV7670_REG_RGB444,
0,
_OV7670_REG_COM15,
_OV7670_COM15_RGB565 | _OV7670_COM15_R00FF,
]
)
# Manual output format, YUV, use full output range
_OV7670_yuv = bytes(
[
_OV7670_REG_COM7,
_OV7670_COM7_YUV,
_OV7670_REG_COM15,
_OV7670_COM15_R00FF,
]
)
_OV7670_init = bytes(
[
_OV7670_REG_TSLB,
_OV7670_TSLB_YLAST, # No auto window
_OV7670_REG_COM10,
_OV7670_COM10_VS_NEG, # -VSYNC (req by SAMD PCC)
_OV7670_REG_SLOP,
0x20,
_OV7670_REG_GAM_BASE,
0x1C,
_OV7670_REG_GAM_BASE + 1,
0x28,
_OV7670_REG_GAM_BASE + 2,
0x3C,
_OV7670_REG_GAM_BASE + 3,
0x55,
_OV7670_REG_GAM_BASE + 4,
0x68,
_OV7670_REG_GAM_BASE + 5,
0x76,
_OV7670_REG_GAM_BASE + 6,
0x80,
_OV7670_REG_GAM_BASE + 7,
0x88,
_OV7670_REG_GAM_BASE + 8,
0x8F,
_OV7670_REG_GAM_BASE + 9,
0x96,
_OV7670_REG_GAM_BASE + 10,
0xA3,
_OV7670_REG_GAM_BASE + 11,
0xAF,
_OV7670_REG_GAM_BASE + 12,
0xC4,
_OV7670_REG_GAM_BASE + 13,
0xD7,
_OV7670_REG_GAM_BASE + 14,
0xE8,
_OV7670_REG_COM8,
_OV7670_COM8_FASTAEC | _OV7670_COM8_AECSTEP | _OV7670_COM8_BANDING,
_OV7670_REG_GAIN,
0x00,
_OV7670_COM2_SSLEEP,
0x00,
_OV7670_REG_COM4,
0x00,
_OV7670_REG_COM9,
0x20, # Max AGC value
_OV7670_REG_BD50MAX,
0x05,
_OV7670_REG_BD60MAX,
0x07,
_OV7670_REG_AEW,
0x75,
_OV7670_REG_AEB,
0x63,
_OV7670_REG_VPT,
0xA5,
_OV7670_REG_HAECC1,
0x78,
_OV7670_REG_HAECC2,
0x68,
0xA1,
0x03, # Reserved register?
_OV7670_REG_HAECC3,
0xDF, # Histogram-based AEC/AGC setup
_OV7670_REG_HAECC4,
0xDF,
_OV7670_REG_HAECC5,
0xF0,
_OV7670_REG_HAECC6,
0x90,
_OV7670_REG_HAECC7,
0x94,
_OV7670_REG_COM8,
_OV7670_COM8_FASTAEC
| _OV7670_COM8_AECSTEP
| _OV7670_COM8_BANDING
| _OV7670_COM8_AGC
| _OV7670_COM8_AEC,
_OV7670_REG_COM5,
0x61,
_OV7670_REG_COM6,
0x4B,
0x16,
0x02, # Reserved register?
_OV7670_REG_MVFP,
0x07, # 0x07,
_OV7670_REG_ADCCTR1,
0x02,
_OV7670_REG_ADCCTR2,
0x91,
0x29,
0x07, # Reserved register?
_OV7670_REG_CHLF,
0x0B,
0x35,
0x0B, # Reserved register?
_OV7670_REG_ADC,
0x1D,
_OV7670_REG_ACOM,
0x71,
_OV7670_REG_OFON,
0x2A,
_OV7670_REG_COM12,
0x78,
0x4D,
0x40, # Reserved register?
0x4E,
0x20, # Reserved register?
_OV7670_REG_GFIX,
0x5D,
_OV7670_REG_REG74,
0x19,
0x8D,
0x4F, # Reserved register?
0x8E,
0x00, # Reserved register?
0x8F,
0x00, # Reserved register?
0x90,
0x00, # Reserved register?
0x91,
0x00, # Reserved register?
_OV7670_REG_DM_LNL,
0x00,
0x96,
0x00, # Reserved register?
0x9A,
0x80, # Reserved register?
0xB0,
0x84, # Reserved register?
_OV7670_REG_ABLC1,
0x0C,
0xB2,
0x0E, # Reserved register?
_OV7670_REG_THL_ST,
0x82,
0xB8,
0x0A, # Reserved register?
_OV7670_REG_AWBC1,
0x14,
_OV7670_REG_AWBC2,
0xF0,
_OV7670_REG_AWBC3,
0x34,
_OV7670_REG_AWBC4,
0x58,
_OV7670_REG_AWBC5,
0x28,
_OV7670_REG_AWBC6,
0x3A,
0x59,
0x88, # Reserved register?
0x5A,
0x88, # Reserved register?
0x5B,
0x44, # Reserved register?
0x5C,
0x67, # Reserved register?
0x5D,
0x49, # Reserved register?
0x5E,
0x0E, # Reserved register?
_OV7670_REG_LCC3,
0x04,
_OV7670_REG_LCC4,
0x20,
_OV7670_REG_LCC5,
0x05,
_OV7670_REG_LCC6,
0x04,
_OV7670_REG_LCC7,
0x08,
_OV7670_REG_AWBCTR3,
0x0A,
_OV7670_REG_AWBCTR2,
0x55,
_OV7670_REG_MTX1,
0x80,
_OV7670_REG_MTX2,
0x80,
_OV7670_REG_MTX3,
0x00,
_OV7670_REG_MTX4,
0x22,
_OV7670_REG_MTX5,
0x5E,
_OV7670_REG_MTX6,
0x80, # 0x40?
_OV7670_REG_AWBCTR1,
0x11,
_OV7670_REG_AWBCTR0,
0x9F, # Or use 0x9E for advance AWB
_OV7670_REG_BRIGHT,
0x00,
_OV7670_REG_CONTRAS,
0x40,
_OV7670_REG_CONTRAS_CENTER,
0x80, # 0x40?
]
)
_window = [
[9, 162, 2, 2], # SIZE_DIV1 640x480 VGA
[10, 174, 4, 2], # SIZE_DIV2 320x240 QVGA
[11, 186, 2, 2], # SIZE_DIV4 160x120 QQVGA
[12, 210, 0, 2], # SIZE_DIV8 80x60 ...
[15, 252, 3, 2], # SIZE_DIV16 40x30
]
class OV7670: # pylint: disable=too-many-instance-attributes
"""Library for the OV7670 digital camera"""
def __init__(
self,
i2c_bus,
data0,
clock,
vsync,
href,
shutdown=None,
reset=None,
mclk=None,
mclk_frequency=16_000_000,
colorspace=OV7670_COLOR_RGB,
i2c_address=0x21,
): # pylint: disable=too-many-arguments
"""
Args:
i2c_bus (busio.I2C): The I2C bus used to configure the OV7670
i2c_address (int): The I2C address of the camera
data0 (microcontroller.Pin): The first of 8 parallel data capture pins
clock (microcontroller.Pin): The pixel clock from the OV7670
vsync (microcontroller.Pin): The vsync signal from the OV7670
href (microcontroller.Pin): The href signal from the OV7670
shutdown: The microcontroller.Pin that controls the camera's \
shutdown signal, also called the powerdown or enable pin, or \
None
reset: The microcontroller.Pin that controls the camera's reset \
signal, or enable pin, or None
mclk: The pin on which to create a master clock signal, or None
mclk_frequency: The frequency of the master clock to generate, \
ignored if mclk is None
colorspace: The colorspace to operate in
size: The size of image to capture
"""
# Initialize the master clock
if mclk:
self._mclk_pwm = pwmio.PWMOut(mclk, frequency=mclk_frequency)
self._mclk_pwm.duty_cycle = 32768
else:
self._mclk_pwm = None
if shutdown:
self._shutdown = digitalio.DigitalInOut(shutdown)
self._shutdown.switch_to_output(True)
time.sleep(0.001)
self._shutdown.switch_to_output(False)
time.sleep(0.3)
else:
self._shutdown = None
if reset:
self._reset = digitalio.DigitalInOut(reset)
self._reset.switch_to_output(False)
time.sleep(0.001)
self._reset.switch_to_output(True)
self._i2c_device = I2CDevice(i2c_bus, i2c_address)
if not reset:
self._write_register(_OV7670_REG_COM7, _OV7670_COM7_RESET)
time.sleep(0.001)
self._colorspace = None
self.colorspace = colorspace
self._write_list(_OV7670_init)
self._size = None
self.size = OV7670_SIZE_DIV8
self._test_pattern = None
self.test_pattern = OV7670_TEST_PATTERN_NONE
self._flip_x = False
self._flip_y = False
self._night = OV7670_NIGHT_MODE_OFF
self._imagecapture = imagecapture.ParallelImageCapture(
data0=data0, clock=clock, vsync=vsync, href=href
)
def capture(self, buf):
"""Capture an image into the buffer."""
self._imagecapture.capture(buf)
@property
def mclk_frequency(self):
"""Get the actual frequency the generated mclk, or None"""
return self._mclk_pwm.frequency if self._mclk_pwm else None
@property
def width(self):
"""Get the image width in pixels. A buffer of 2*width*height bytes \
stores a whole image."""
return 640 >> self._size
@property
def height(self):
"""Get the image height in pixels. A buffer of 2*width*height bytes \
stores a whole image."""
return 480 >> self._size
@property
def colorspace(self):
"""Get or set the colorspace"""
return self._colorspace
@colorspace.setter
def colorspace(self, colorspace):
self._colorspace = colorspace
self._write_list(_OV7670_rgb if colorspace == OV7670_COLOR_RGB else _OV7670_yuv)
def deinit(self):
"""Deinitialize the camera"""
if self._mclk_pwm:
self._mclk_pwm.deinit()
if self._shutdown:
self._shutdown.deinit()
if self._reset:
self._reset.deinit()
@property
def size(self):
"""Get or set the captured image size"""
return self._size
@size.setter
def size(self, size):
self._frame_control(size, *_window[size])
self._size = size
@property
def test_pattern(self):
"""Get or set the test pattern"""
return self._test_pattern
@test_pattern.setter
def test_pattern(self, pattern):
# Modify only test pattern bits (not scaling bits)
xsc = self._read_register(_OV7670_REG_SCALING_XSC) & ~0x80
ysc = self._read_register(_OV7670_REG_SCALING_YSC) & ~0x80
if pattern & 1:
xsc |= 0x80
if pattern & 3:
ysc |= 0x80
# Write modified result back to SCALING_XSC and SCALING_YSC
self._write_register(_OV7670_REG_SCALING_XSC, xsc)
self._write_register(_OV7670_REG_SCALING_YSC, ysc)
def _set_flip(self):
mvfp = self._read_register(_OV7670_REG_MVFP)
if self._flip_x:
mvfp |= _OV7670_MVFP_MIRROR
else:
mvfp &= ~_OV7670_MVFP_MIRROR
if self._flip_y:
mvfp |= _OV7670_MVFP_VFLIP
else:
mvfp &= ~_OV7670_MVFP_VFLIP
self._write_register(_OV7670_REG_MVFP, mvfp)
@property
def flip_x(self):
"""Get or set the X-flip flag"""
return self._flip_x
@flip_x.setter
def flip_x(self, value):
self._flip_x = bool(value)
self._set_flip()
@property
def flip_y(self):
"""Get or set the Y-flip flag"""
return self._flip_y
@flip_y.setter
def flip_y(self, value):
self._flip_y = bool(value)
self._set_flip()
@property
def night(self):
"""Get or set the night-vision mode"""
return self._night
@night.setter
def night(self, value):
com11 = self._read_register(_OV7670_REG_COM11)
com11 = (com11 & 0b00011111) | value
self._write_register(_OV7670_REG_COM11, com11)
self._night = value
@property
def product_id(self):
"""Get the product id (PID) register"""
return self._read_register(_OV7670_REG_PID)
@property
def product_version(self):
"""Get the version (VER) register"""
return self._read_register(_OV7670_REG_VER)
def _write_list(self, reg_list):
for i in range(0, len(reg_list), 2):
self._write_register(reg_list[i], reg_list[i + 1])
time.sleep(0.001)
def _write_register(self, reg, value):
b = bytearray(2)
b[0] = reg
b[1] = value
with self._i2c_device as i2c:
i2c.write(b)
def _read_register(self, reg):
b = bytearray(1)
b[0] = reg
with self._i2c_device as i2c:
i2c.write(b)
i2c.readinto(b)
return b[0]
def _frame_control(
self, size, vstart, hstart, edge_offset, pclk_delay
): # pylint: disable=too-many-arguments
# Enable downsampling if sub-VGA, and zoom if 1:16 scale
value = _OV7670_COM3_DCWEN if (size > OV7670_SIZE_DIV1) else 0
if size == OV7670_SIZE_DIV16:
value |= _OV7670_COM3_SCALEEN
self._write_register(_OV7670_REG_COM3, value)
# Enable PCLK division if sub-VGA 2,4,8,16 = 0x19,1A,1B,1C
value = (0x18 + size) if (size > OV7670_SIZE_DIV1) else 0
self._write_register(_OV7670_REG_COM14, value)
# Horiz/vert downsample ratio, 1:8 max (H,V are always equal for now)
value = size if (size <= OV7670_SIZE_DIV8) else OV7670_SIZE_DIV8
self._write_register(_OV7670_REG_SCALING_DCWCTR, value * 0x11)
# Pixel clock divider if sub-VGA
value = (0xF0 | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections.abc
import datetime
import re
import json
import six
import toscaparser
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidSchemaError
from toscaparser.common.exception import ValidationError
from toscaparser.elements.portspectype import PortSpec
from toscaparser.elements import scalarunit
from toscaparser.utils.gettextutils import _
from toscaparser.utils import yamlparser
class Schema(collections.abc.Mapping):
KEYS = (TYPE, REQUIRED, DESCRIPTION, DEFAULT, CONSTRAINTS, ENTRYSCHEMA, STATUS) = (
"type",
"required",
"description",
"default",
"constraints",
"entry_schema",
"status",
)
PROPERTY_TYPES = (
INTEGER,
STRING,
BOOLEAN,
FLOAT,
RANGE,
NUMBER,
TIMESTAMP,
LIST,
MAP,
SCALAR_UNIT_SIZE,
SCALAR_UNIT_FREQUENCY,
SCALAR_UNIT_TIME,
VERSION,
PORTDEF,
PORTSPEC,
ANY,
) = (
"integer",
"string",
"boolean",
"float",
"range",
"number",
"timestamp",
"list",
"map",
"scalar-unit.size",
"scalar-unit.frequency",
"scalar-unit.time",
"version",
"PortDef",
PortSpec.SHORTNAME,
"any",
)
SCALAR_UNIT_SIZE_DEFAULT = "B"
SCALAR_UNIT_SIZE_DICT = {
"B": 1,
"KB": 1000,
"KIB": 1024,
"MB": 1000000,
"MIB": 1048576,
"GB": 1000000000,
"GIB": 1073741824,
"TB": 1000000000000,
"TIB": 1099511627776,
}
def __init__(self, name, schema_dict, datatype=None):
self.name = name
if not isinstance(schema_dict, collections.abc.Mapping):
msg = (_('Schema definition of "%(pname)s" must be a dict.')
% dict(pname=name))
ExceptionCollector.appendException(InvalidSchemaError(message=msg))
try:
self.type = datatype or schema_dict["type"]
except KeyError:
msg = _(
'Schema definition of "%(pname)s" must have a "type" ' "attribute."
) % dict(pname=name)
ExceptionCollector.appendException(InvalidSchemaError(message=msg))
self.schema = schema_dict
self._len = None
self.constraints_list = []
@property
def required(self):
return self.schema.get(self.REQUIRED, True)
@property
def description(self):
return self.schema.get(self.DESCRIPTION, "")
@property
def default(self):
return self.schema.get(self.DEFAULT)
@property
def status(self):
return self.schema.get(self.STATUS, "")
@property
def constraints(self):
if not self.constraints_list:
constraint_schemata = self.schema.get(self.CONSTRAINTS)
if constraint_schemata:
self.constraints_list = [
Constraint(self.name, self.type, cschema)
for cschema in constraint_schemata
]
return self.constraints_list
@property
def entry_schema(self):
return self.schema.get(self.ENTRYSCHEMA)
def __getitem__(self, key):
return self.schema[key]
def __iter__(self):
for k in self.KEYS:
try:
self.schema[k]
except KeyError:
pass
else:
yield k
def __len__(self):
if self._len is None:
self._len = len(list(iter(self)))
return self._len
class Constraint(object):
"""Parent class for constraints for a Property or Input."""
CONSTRAINTS = (
EQUAL,
GREATER_THAN,
GREATER_OR_EQUAL,
LESS_THAN,
LESS_OR_EQUAL,
IN_RANGE,
VALID_VALUES,
LENGTH,
MIN_LENGTH,
MAX_LENGTH,
PATTERN,
SCHEMA,
) = (
"equal",
"greater_than",
"greater_or_equal",
"less_than",
"less_or_equal",
"in_range",
"valid_values",
"length",
"min_length",
"max_length",
"pattern",
"schema",
)
def __new__(cls, property_name=None, property_type=None, constraint=None):
if cls is not Constraint:
return super(Constraint, cls).__new__(cls)
if(not isinstance(constraint, collections.abc.Mapping) or
len(constraint) != 1):
ExceptionCollector.appendException(
InvalidSchemaError(message=_("Invalid constraint schema: %s") % constraint)
)
else:
for type in constraint.keys():
ConstraintClass = get_constraint_class(type)
if not ConstraintClass:
msg = _('Invalid property "%s".') % type
ExceptionCollector.appendException(InvalidSchemaError(message=msg))
return ConstraintClass(property_name, property_type, constraint)
def __init__(self, property_name, property_type, constraint):
self.property_name = property_name
self.property_type = property_type
self.constraint_value = constraint[self.constraint_key]
self.constraint_value_msg = self.constraint_value
if self.property_type in scalarunit.ScalarUnit.SCALAR_UNIT_TYPES:
self.constraint_value = self._get_scalarunit_constraint_value()
# check if constraint is valid for property type
if property_type not in self.valid_prop_types:
msg = _(
'Property "%(ctype)s" is not valid for data type ' '"%(dtype)s".'
) % dict(ctype=self.constraint_key, dtype=property_type)
ExceptionCollector.appendException(InvalidSchemaError(message=msg))
def _get_scalarunit_constraint_value(self):
if self.property_type in scalarunit.ScalarUnit.SCALAR_UNIT_TYPES:
ScalarUnit_Class = scalarunit.get_scalarunit_class(self.property_type)
if isinstance(self.constraint_value, list):
return [
ScalarUnit_Class(v).get_num_from_scalar_unit()
for v in self.constraint_value
]
else:
return ScalarUnit_Class(self.constraint_value).get_num_from_scalar_unit()
def _err_msg(self, value):
return _('Property "%s" could not be validated.') % self.property_name
def validate(self, value):
self.value_msg = value
if self.property_type in scalarunit.ScalarUnit.SCALAR_UNIT_TYPES:
value = scalarunit.get_scalarunit_value(self.property_type, value)
if not self._is_valid(value):
err_msg = self._err_msg(value)
ExceptionCollector.appendException(ValidationError(message=err_msg))
class Equal(Constraint):
"""Constraint class for "equal"
Constrains a property or parameter to a value equal to ('=')
the value declared.
"""
constraint_key = Constraint.EQUAL
valid_prop_types = Schema.PROPERTY_TYPES
def _is_valid(self, value):
if value == self.constraint_value:
return True
return False
def _err_msg(self, value):
return _(
'The value "%(pvalue)s" of property "%(pname)s" is not '
'equal to "%(cvalue)s".'
) % dict(
pname=self.property_name,
pvalue=self.value_msg,
cvalue=self.constraint_value_msg,
)
class GreaterThan(Constraint):
"""Constraint class for "greater_than"
Constrains a property or parameter to a value greater than ('>')
the value declared.
"""
constraint_key = Constraint.GREATER_THAN
valid_types = (int, float, datetime.date, datetime.time, datetime.datetime)
valid_prop_types = (
Schema.INTEGER,
Schema.FLOAT,
Schema.TIMESTAMP,
Schema.SCALAR_UNIT_SIZE,
Schema.SCALAR_UNIT_FREQUENCY,
Schema.SCALAR_UNIT_TIME,
)
def __init__(self, property_name, property_type, constraint):
super(GreaterThan, self).__init__(property_name, property_type, constraint)
if not isinstance(constraint[self.GREATER_THAN], self.valid_types):
ExceptionCollector.appendException(
InvalidSchemaError(
message=_(
'The property "greater_than" ' "expects comparable values."
)
)
)
def _is_valid(self, value):
if value > self.constraint_value:
return True
return False
def _err_msg(self, value):
return _(
'The value "%(pvalue)s" of property "%(pname)s" must be '
'greater than "%(cvalue)s".'
) % dict(
pname=self.property_name,
pvalue=self.value_msg,
cvalue=self.constraint_value_msg,
)
class GreaterOrEqual(Constraint):
"""Constraint class for "greater_or_equal"
Constrains a property or parameter to a value greater than or equal
to ('>=') the value declared.
"""
constraint_key = Constraint.GREATER_OR_EQUAL
valid_types = (int, float, datetime.date, datetime.time, datetime.datetime)
valid_prop_types = (
Schema.INTEGER,
Schema.FLOAT,
Schema.TIMESTAMP,
Schema.SCALAR_UNIT_SIZE,
Schema.SCALAR_UNIT_FREQUENCY,
Schema.SCALAR_UNIT_TIME,
)
def __init__(self, property_name, property_type, constraint):
super(GreaterOrEqual, self).__init__(property_name, property_type, constraint)
if not isinstance(self.constraint_value, self.valid_types):
ExceptionCollector.appendException(
InvalidSchemaError(
message=_(
"The property "
'"greater_or_equal" expects '
"comparable values."
)
)
)
def _is_valid(self, value):
if value is not None and (toscaparser.functions.is_function(value) or
value >= self.constraint_value):
return True
return False
def _err_msg(self, value):
return _(
'The value "%(pvalue)s" of property "%(pname)s" must be '
'greater than or equal to "%(cvalue)s".'
) % dict(
pname=self.property_name,
pvalue=self.value_msg,
cvalue=self.constraint_value_msg,
)
class LessThan(Constraint):
"""Constraint class for "less_than"
Constrains a property or parameter to a value less than ('<')
the value declared.
"""
constraint_key = Constraint.LESS_THAN
valid_types = (int, float, datetime.date, datetime.time, datetime.datetime)
valid_prop_types = (
Schema.INTEGER,
Schema.FLOAT,
Schema.TIMESTAMP,
Schema.SCALAR_UNIT_SIZE,
Schema.SCALAR_UNIT_FREQUENCY,
Schema.SCALAR_UNIT_TIME,
)
def __init__(self, property_name, property_type, constraint):
super(LessThan, self).__init__(property_name, property_type, constraint)
if not isinstance(self.constraint_value, self.valid_types):
ExceptionCollector.appendException(
InvalidSchemaError(
message=_('The property "less_than" ' "expects comparable values.")
)
)
def _is_valid(self, value):
if value < self.constraint_value:
return True
return False
def _err_msg(self, value):
return _(
'The value "%(pvalue)s" of property "%(pname)s" must be '
'less than "%(cvalue)s".'
) % dict(
pname=self.property_name,
pvalue=self.value_msg,
cvalue=self.constraint_value_msg,
)
class LessOrEqual(Constraint):
"""Constraint class for "less_or_equal"
Constrains a property or parameter to a value less than or equal
to ('<=') the value declared.
"""
constraint_key = Constraint.LESS_OR_EQUAL
valid_types = (int, float, datetime.date, datetime.time, datetime.datetime)
valid_prop_types = (
Schema.INTEGER,
Schema.FLOAT,
Schema.TIMESTAMP,
Schema.SCALAR_UNIT_SIZE,
Schema.SCALAR_UNIT_FREQUENCY,
Schema.SCALAR_UNIT_TIME,
)
def __init__(self, property_name, property_type, constraint):
super(LessOrEqual, self).__init__(property_name, property_type, constraint)
if not isinstance(self.constraint_value, self.valid_types):
ExceptionCollector.appendException(
InvalidSchemaError(
message=_(
'The property "less_or_equal" ' "expects comparable values."
)
)
)
def _is_valid(self, value):
if value <= self.constraint_value:
return True
return False
def _err_msg(self, value):
return _(
'The value "%(pvalue)s" of property "%(pname)s" must be '
'less than or equal to "%(cvalue)s".'
) % dict(
pname=self.property_name,
pvalue=self.value_msg,
cvalue=self.constraint_value_msg,
)
class InRange(Constraint):
"""Constraint class for "in_range"
Constrains a property or parameter to a value in range of (inclusive)
the two values declared.
"""
UNBOUNDED = "UNBOUNDED"
constraint_key = Constraint.IN_RANGE
valid_types = (int, float, datetime.date, datetime.time, datetime.datetime,) + six.string_types
valid_prop_types = (
Schema.INTEGER,
Schema.FLOAT,
Schema.TIMESTAMP,
Schema.SCALAR_UNIT_SIZE,
Schema.SCALAR_UNIT_FREQUENCY,
Schema.SCALAR_UNIT_TIME,
Schema.RANGE,
)
def __init__(self, property_name, property_type, constraint):
super(InRange, self).__init__(property_name, property_type, constraint)
if(not isinstance(self.constraint_value, collections.abc.Sequence) or
(len(constraint[self.IN_RANGE]) != 2)):
ExceptionCollector.appendException(
InvalidSchemaError(
message=_('The property "in_range" ' "expects a list.")
)
)
msg = _('The property "in_range" expects comparable values.')
for value in self.constraint_value:
if not isinstance(value, self.valid_types):
ExceptionCollector.appendException(InvalidSchemaError(message=msg))
# The only string we allow for range is the special value
# 'UNBOUNDED'
if isinstance(value, six.string_types) and value != self.UNBOUNDED:
ExceptionCollector.appendException(InvalidSchemaError(message=msg))
self.min = self.constraint_value[0]
self.max = self.constraint_value[1]
def _is_valid(self, value):
if not isinstance(self.min, six.string_types):
if value < self.min:
return False
elif self.min != self.UNBOUNDED:
return False
if not isinstance(self.max, six.string_types):
if value > self.max:
return False
elif self.max != self.UNBOUNDED:
return False
return True
def _err_msg(self, value):
return _(
'The value "%(pvalue)s" of property "%(pname)s" is out of '
'range "(min:%(vmin)s, max:%(vmax)s)".'
) % dict(
pname=self.property_name,
pvalue=self.value_msg,
vmin=self.constraint_value_msg[0],
vmax=self.constraint_value_msg[1],
)
class ValidValues(Constraint):
"""Constraint class for "valid_values"
Constrains a property or parameter to a value that is in the list of
declared values.
"""
constraint_key = Constraint.VALID_VALUES
valid_prop_types = Schema.PROPERTY_TYPES
def __init__(self, property_name, property_type, constraint):
super(ValidValues, self).__init__(property_name, property_type,
constraint)
if not isinstance(self.constraint_value, collections.abc.Sequence):
ExceptionCollector.appendException(
InvalidSchemaError(
message=_('The property "valid_values" ' "expects a list.")
)
)
def _is_valid(self, value):
if isinstance(value, list):
return all(v in self.constraint_value for v in value)
return value in self.constraint_value
def _err_msg(self, value):
allowed = "[%s]" % ", ".join(str(a) | |
<reponame>kareem1925/pennylane<filename>pennylane/operation.py<gh_stars>0
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=protected-access
r"""
This module contains the abstract base classes for defining PennyLane
operations and observables.
Description
-----------
Qubit Operations
~~~~~~~~~~~~~~~~
The :class:`Operator` class serves as a base class for operators,
and is inherited by both the :class:`Observable` class and the
:class:`Operation` class. These classes are subclassed to implement quantum operations
and measure observables in PennyLane.
* Each :class:`~.Operator` subclass represents a general type of
map between physical states. Each instance of these subclasses
represents either
- an application of the operator or
- an instruction to measure and return the respective result.
Operators act on a sequence of wires (subsystems) using given parameter values.
* Each :class:`~.Operation` subclass represents a type of quantum operation,
for example a unitary quantum gate. Each instance of these subclasses
represents an application of the operation with given parameter values to
a given sequence of wires (subsystems).
* Each :class:`~.Observable` subclass represents a type of physical observable.
Each instance of these subclasses represents an instruction to measure and
return the respective result for the given parameter values on a
sequence of wires (subsystems).
Differentiation
^^^^^^^^^^^^^^^
In general, an :class:`Operation` is differentiable (at least using the finite-difference
method) with respect to a parameter iff
* the domain of that parameter is continuous.
For an :class:`Operation` to be differentiable with respect to a parameter using the
analytic method of differentiation, it must satisfy an additional constraint:
* the parameter domain must be real.
.. note::
These conditions are *not* sufficient for analytic differentiation. For example,
CV gates must also define a matrix representing their Heisenberg linear
transformation on the quadrature operators.
For gates that *are* supported via the analytic method, the gradient recipe
(with multiplier :math:`c_k`, parameter shift :math:`s_k` for parameter :math:`\phi_k`)
works as follows:
.. math:: \frac{\partial}{\partial\phi_k}O = c_k\left[O(\phi_k+s_k)-O(\phi_k-s_k)\right].
CV Operation base classes
~~~~~~~~~~~~~~~~~~~~~~~~~
Due to additional requirements, continuous-variable (CV) operations must subclass the
:class:`~.CVOperation` or :class:`~.CVObservable` classes instead of :class:`~.Operation`
and :class:`~.Observable`.
Differentiation
^^^^^^^^^^^^^^^
To enable gradient computation using the analytic method for Gaussian CV operations, in addition, you need to
provide the static class method :meth:`~.CV._heisenberg_rep` that returns the Heisenberg representation of
the operation given its list of parameters, namely:
* For Gaussian CV Operations this method should return the matrix of the linear transformation carried out by the
operation on the vector of quadrature operators :math:`\mathbf{r}` for the given parameter
values.
* For Gaussian CV Observables this method should return a real vector (first-order observables)
or symmetric matrix (second-order observables) of coefficients of the quadrature
operators :math:`\x` and :math:`\p`.
PennyLane uses the convention :math:`\mathbf{r} = (\I, \x, \p)` for single-mode operations and observables
and :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)` for multi-mode operations and observables.
.. note::
Non-Gaussian CV operations and observables are currently only supported via
the finite-difference method of gradient computation.
"""
import abc
import itertools
import functools
import numbers
from collections.abc import Sequence
from enum import Enum, IntEnum
from pennylane.wires import Wires
import numpy as np
from numpy.linalg import multi_dot
import pennylane as qml
from .utils import pauli_eigs
from .variable import Variable
# =============================================================================
# Wire types
# =============================================================================
class ActsOn(IntEnum):
"""Integer enumeration class
to represent the number of wires
an operation acts on"""
AnyWires = -1
AllWires = 0
AllWires = ActsOn.AllWires
"""IntEnum: An enumeration which represents all wires in the
subsystem. It is equivalent to an integer with value 0."""
AnyWires = ActsOn.AnyWires
"""IntEnum: An enumeration which represents any wires in the
subsystem. It is equivalent to an integer with value -1."""
# =============================================================================
# ObservableReturnTypes types
# =============================================================================
class ObservableReturnTypes(Enum):
"""Enumeration class to represent the return types of an observable."""
Sample = "sample"
Variance = "var"
Expectation = "expval"
Probability = "probs"
def __repr__(self):
"""String representation of the return types."""
return str(self.value)
Sample = ObservableReturnTypes.Sample
"""Enum: An enumeration which represents sampling an observable."""
Variance = ObservableReturnTypes.Variance
"""Enum: An enumeration which represents returning the variance of
an observable on specified wires."""
Expectation = ObservableReturnTypes.Expectation
"""Enum: An enumeration which represents returning the expectation
value of an observable on specified wires."""
Probability = ObservableReturnTypes.Probability
"""Enum: An enumeration which represents returning probabilities
of all computational basis states."""
# =============================================================================
# Class property
# =============================================================================
class ClassPropertyDescriptor: # pragma: no cover
"""Allows a class property to be defined"""
# pylint: disable=too-few-public-methods
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
"""Set the function as a class method, and store as an attribute."""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
"""The class property decorator"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
# =============================================================================
# Base Operator class
# =============================================================================
class Operator(abc.ABC):
r"""Base class for quantum operators supported by a device.
The following class attributes must be defined for all Operators:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
Args:
params (tuple[float, int, array, Variable]): operator parameters
Keyword Args:
wires (Iterable): Iterable containing the wires that the operator acts
on. If not given, args[-1] is interpreted as wires.
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue.
"""
do_check_domain = True #: bool: flag: should we perform a domain check for the parameters?
@classmethod
def _matrix(cls, *params):
"""Matrix representation of the operator
in the computational basis.
This is a *class method* that should be defined for all
new operations and observables, that returns the matrix representing
the operator in the computational basis.
This private method allows matrices to be computed
directly without instantiating the operators first.
To return the matrices of *instantiated* operators,
please use the :attr:`~.Operator.matrix` property instead.
**Example:**
>>> qml.RY._matrix(0.5)
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
"""
raise NotImplementedError
@property
def matrix(self):
r"""Matrix representation of an instantiated operator
in the computational basis.
**Example:**
>>> U = qml.RY(0.5, wires=1)
>>> U.matrix
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
"""
return self._matrix(*self.parameters)
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the operator.
This is a *class method* that should be defined for all
new operations and observables that returns the eigenvalues
of the operator. Note that the eigenvalues are not guaranteed
to be in any particular order.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
The default implementation relies on the presence of the
:attr:`_matrix` method.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
"""
return np.linalg.eigvals(cls._matrix(*params))
@property
def eigvals(self):
r"""Eigenvalues of an instantiated operator.
Note that the eigenvalues are not guaranteed to be in any
particular order.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
"""
return self._eigvals(*self.parameters)
@property
@abc.abstractmethod
def num_params(self):
"""Number of parameters the operator takes."""
@property
@abc.abstractmethod
def num_wires(self):
"""Number of wires the operator acts on."""
@property
@abc.abstractmethod
def par_domain(self):
"""Domain of the gate parameters.
* ``'N'``: natural numbers (including zero).
* ``'R'``: floats.
* ``'A'``: arrays of real or complex values.
* ``None``: if there are no parameters.
"""
@property
def name(self):
"""String for the name of the operator.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
def __init__(self, *params, wires=None, do_queue=True):
# pylint: disable=too-many-branches
self._name = self.__class__.__name__ #: str: name of the operator
self.queue_idx = None #: int, None: index of the Operator in the circuit queue, or None if not in a queue
if wires is None:
raise ValueError("Must specify the wires that {} acts on".format(self.name))
wires = Wires(wires)
self._wires = wires.tolist() #: list[int]: wires on which the operator acts
# check that the number of wires given corresponds to required number
| |
in change.items():
self.fs.rename(oripath, fullpath)
self.nodes = [self.header_node] + self.sort_nodes(
self.nodes) + [self.footer_node]
self.render()
self.set_clineno_by_node(oriNode)
Vim.command('setlocal nomodifiable')
return True
def cut(self, nodes):
for node in nodes:
node.cut()
self.highlight_outdated_nodes.update(nodes)
def copy(self, nodes):
for node in nodes:
node.copy()
self.highlight_outdated_nodes.update(nodes)
def reset_hi(self, nodes):
for node in nodes:
node.reset_hi()
self.highlight_outdated_nodes.update(nodes)
def find_next_ind(self, nodes, ind, pred):
beg_node = nodes[ind]
ind += 1
sz = len(self.nodes)
while ind < sz:
if pred(beg_node, nodes[ind]):
break
ind += 1
return ind
def next_lesseq_level_ind(self, begInd, nodes=None):
if nodes is None:
nodes = self.nodes
return self.find_next_ind(
nodes, begInd, lambda beg, new: new.level <= beg.level)
class Netranger(object):
"""Main (mvc) controler.
Main functions are:
1. on_bufenter: create / update netr buffers
2. invoke_map: invoke one of NETR* function on user key press
"""
@property
def cur_buf(self):
return self.bufs[Vim.current.buffer.number]
@property
def cur_buf_is_remote(self):
return self.cur_buf.fs is Rclone
@property
def cur_node(self):
return self.cur_buf.cur_node
@property
def cwd(self):
return self.cur_buf.wd
def __init__(self):
self.inited = False
self.bufs = {}
self.wd2bufnum = {}
self.picked_nodes = defaultdict(set)
self.cut_nodes = defaultdict(set)
self.copied_nodes = defaultdict(set)
self.bookmarkUI = None
self.helpUI = None
self.sortUI = None
self.askUI = None
self.onuiquit = None
self.newUI = None
self.previewUI = None
self.onuiquit_num_args = 0
self.init_vim_variables()
self.init_keymaps()
Rclone.init(Vim.Var('NETRemoteCacheDir'), Vim.Var('NETRemoteRoots'))
Shell.mkdir(default.variables['NETRRootDir'])
self.rifle = Rifle(Vim.Var('NETRRifleFile'))
ignore_pat = list(Vim.Var('NETRIgnore'))
if '.*' not in ignore_pat:
ignore_pat.append('.*')
Vim.vars['NETRIgnore'] = ignore_pat
self.ignore_pattern = re.compile('|'.join(
fnmatch.translate(p) for p in ignore_pat))
Vim.vars['NETRemoteCacheDir'] = os.path.expanduser(
Vim.Var('NETRemoteCacheDir'))
def init_vim_variables(self):
for k, v in default.variables.items():
if k not in Vim.vars:
Vim.vars[k] = v
self.reset_default_colors()
for k, v in default.internal_variables.items():
if k not in Vim.vars:
Vim.vars[k] = v
def reset_default_colors(self):
for name, color in Vim.Var('NETRColors').items():
if name not in default.color:
Vim.ErrorMsg('netranger: {} is not a valid NETRColors key!')
continue
if type(color) is int and (color < 0 or color > 255):
Vim.ErrorMsg('netranger: Color value should be within 0~255')
continue
elif type(color) is str:
if color[0] == '#':
color = colorhexstr2ind.get(color, None)
else:
color = colorname2ind.get(color, None)
if color is None:
Vim.ErrorMsg('netranger: {} is not a valid color name!')
continue
default.color[name] = color
for key, value in default.color.items():
if type(value) is str:
default.color[key] = colorname2ind[value]
def should_ignore(self, basename):
if self.ignore_pattern.match(basename) and self.ignore_pattern:
return True
return False
def init_keymaps(self):
"""Add key mappings to NETR* functions for netranger buffers.
Override or skip some default mappings on user demand.
"""
self.keymap_doc = {}
self.key2fn = {}
self.visual_key2fn = {}
skip = []
for k in Vim.Var('NETRDefaultMapSkip'):
skip.append(k.lower())
for fn, (keys, desc) in default.keymap.items():
user_keys = Vim.Var(fn, [])
user_keys += [k for k in keys if k not in skip]
self.keymap_doc[fn] = (keys, desc)
for key in user_keys:
self.key2fn[key] = getattr(self, fn)
skip = []
for k in Vim.Var('NETRDefaultVisualMapSkip'):
skip.append(k.lower())
for fn, (keys, desc) in default.visual_keymap.items():
user_keys = Vim.Var(fn, [])
user_keys += [k for k in keys if k not in skip]
self.keymap_doc[fn] = (keys, desc)
for key in user_keys:
self.visual_key2fn[key] = getattr(self, fn)
def map_keys(self):
def literal(key):
if key[0] == '<':
escape_key = '<lt>' + key[1:]
else:
escape_key = key
return escape_key
for key in self.key2fn:
Vim.command("nnoremap <nowait> <silent> <buffer> {} "
':py3 ranger.key2fn[\"{}\"]()<cr>'.format(
key, literal(key)))
for key in self.visual_key2fn:
Vim.command("vnoremap <nowait> <silent> <buffer> {} "
':py3 ranger.visual_key2fn[\"{}\"]()<cr>'.format(
key, literal(key)))
def unmap_keys(self):
for key, fn in self.key2fn.items():
if fn.__name__ == 'NETRSave':
continue
Vim.command("nunmap <silent> <buffer> {}".format(key))
for key, fn in self.visual_key2fn.items():
Vim.command("vunmap <silent> <buffer> {}".format(key))
def map(self, key, fn, check=False):
if check and key in self.key2fn:
Vim.ErrorMsg("netranger: Fail to bind key {} to {} because it has "
"been mapped to other {}.".format(
key, fn.__name__, self.key2fn[key].__name__))
self.key2fn[key] = fn
def on_winenter(self, bufnum):
# deal with window width changed
if bufnum in self.bufs:
self.cur_buf.refresh_hi_if_winwidth_changed()
def on_bufenter(self, bufnum):
"""There are four cases on bufenter:
1. The buffer is not a netranger buffer: do nothing
2. The buffer is a existing netranger buffer: refresh buffer content
(e.g. directory content changed else where) and call any pending
onuiquit functions
3. The buffer is a [No Name] temporary buffer and the buffer name is a
directory. Then we either create a new netranger buffer or bring up
an existing netranger buffer
"""
if bufnum in self.bufs:
self.refresh_curbuf()
if self.onuiquit is not None:
# If not enough arguments are passed, ignore the pending
# onuituit, e.g. quit the bookmark go ui without pressing
# key to specify where to go.
if len(Vim.Var('NETRRegister')) == self.onuiquit_num_args:
self.onuiquit(*Vim.Var('NETRRegister'))
self.onuiquit = None
Vim.vars['NETRRegister'] = []
self.onuiquit_num_args = 0
else:
bufname = Vim.current.buffer.name
if len(bufname) > 0 and bufname[-1] == '~':
bufname = os.path.expanduser('~')
if not os.path.isdir(bufname):
return
if os.path.islink(bufname):
bufname = os.path.join(os.path.dirname(bufname),
os.readlink(bufname))
bufname = os.path.abspath(bufname)
if self.buf_existed(bufname):
self.show_existing_buf(bufname)
else:
self.gen_new_buf(bufname)
def refresh_curbuf(self):
cur_buf = self.cur_buf
# manually turn off highlight of current linen as synchronous
# on_bufenter block on_cursormoved event handler
cur_buf.cur_node.cursor_off()
# deal with content changed, e.g., file operation outside
cur_buf.refresh_nodes()
# deal with highlight changed, e.g., pick, copy hi dismiss because of
# paste
cur_buf.refresh_highlight()
# ensure pwd is correct
if Vim.Var('NETRAutochdir'):
Vim.command('lcd ' + cur_buf.last_vim_pwd)
def show_existing_buf(self, bufname):
ori_bufnum = Vim.current.buffer.number
existed_bufnum = self.wd2bufnum[bufname]
Vim.command('{}b'.format(existed_bufnum))
self.set_buf_option()
buf = self.bufs[existed_bufnum]
self.refresh_curbuf()
# Check window width in case the window was closed in a different
# width
buf.refresh_hi_if_winwidth_changed()
if ori_bufnum not in self.bufs:
# wipe out the [No Name] temporary buffer
Vim.command('bwipeout {}'.format(ori_bufnum))
buf.move_vim_cursor(buf.clineNo)
def gen_new_buf(self, bufname):
bufnum = Vim.current.buffer.number
if (bufname.startswith(Vim.Var('NETRemoteCacheDir'))):
self.bufs[bufnum] = NetRangerBuf(self, os.path.abspath(bufname),
Rclone)
else:
self.bufs[bufnum] = NetRangerBuf(self, os.path.abspath(bufname),
LocalFS)
Vim.command('silent file N:{}'.format(bufname))
self.map_keys()
self.wd2bufnum[bufname] = bufnum
self.set_buf_option()
def buf_existed(self, wd):
""" Check if there's an existing NETRangerBuf.
This avoids reinitializing a NETRangerBuf when the corresponding vim
buffer is wipeout and later reentered.
"""
if wd not in self.wd2bufnum:
return False
bufnum = self.wd2bufnum[wd]
try:
buf = Vim.buffers[bufnum]
return buf.valid
except KeyError:
del self.wd2bufnum[wd]
del self.bufs[bufnum]
return False
def set_buf_option(self):
Vim.command('setlocal buftype=nofile')
Vim.command('setlocal filetype=netranger')
Vim.command('setlocal encoding=utf-8')
Vim.command('setlocal noswapfile')
Vim.command('setlocal nowrap')
Vim.command('setlocal foldmethod=manual')
Vim.command('setlocal foldcolumn=0')
Vim.command('setlocal nofoldenable')
Vim.command('setlocal nobuflisted')
Vim.command('setlocal nospell')
Vim.command('setlocal bufhidden=hide')
Vim.command('setlocal conceallevel=3')
Vim.command('set concealcursor=nvic')
Vim.command('setlocal nocursorline')
Vim.command('setlocal nolist')
def on_cursormoved(self, bufnum):
"""refresh buffer highlight when cursor is moved.
@param bufnum: current buffer number
"""
# if bufnum in self.bufs and not self.bufs[bufnum].is_editing:
if not self.bufs[bufnum].is_editing:
self.bufs[bufnum].on_cursormoved()
Vim.Timer(Vim.Var('NETRRedrawDelay'), '_NETROnCursorMovedPost',
self.on_cursormoved_post, bufnum)
def on_cursormoved_post(self, bufnum):
"""Refresh header and footer content.
re_stat is a heavy task (compared to setting highlight when cursor
moved). To avoid unnecessary calling of re_stat (e.g. when the user
keep pressing j just to move down and don't care the stat information
), we delay re_stat in on_cursormoved by using timer and avoid
re_stat throttling using a trick documented in NETRangerBuf.
on_cursormoved_post.
"""
self.bufs[bufnum].on_cursormoved_post()
if self.previewUI and Vim.current.buffer.number in self.bufs:
self.previewUI.set_content(self.cur_node.fullpath)
def pend_onuiquit(self, fn, numArgs=0):
"""Called by UIs to perform actions after reentering netranger buffer.
Used for waiting for user input in some UI and then defer what to do
when the UI window is quit and the netranger buffer gain focus again.
Function arguments are passed as a list via vim variable g:'
NETRRegister'.
@param fn: function to be executed
@param numArgs: number of args expected to see in g:'NETRRegister'.
When exectuing fn, if numArgs do not match, fn will not
be executed. (e.g. User press no keys in BookMarkGo UI
but simply quit the UI)
"""
self.onuiquit = fn
self.onuiquit_num_args = numArgs
def NETROpen(self, open_cmd=None, rifle_cmd=None, use_rifle=True):
"""The real work for opening directories is handled in on_bufenter.
For openning files, we check if there's rifle rule to open the
file. Otherwise, open it in vim.
"""
cur_node = self.cur_node
if cur_node.is_INFO:
return
if open_cmd is None:
if cur_node.is_DIR:
open_cmd = 'edit'
else:
open_cmd = Vim.Var('NETROpenCmd')
fullpath = cur_node.fullpath
if cur_node.is_DIR:
if not os.access(cur_node.fullpath, os.X_OK):
Vim.ErrorMsg('Permission Denied: {}'.format(cur_node.name))
return
if use_rifle and rifle_cmd is not None:
Shell.run_async(rifle_cmd.format('"{}"'.format(fullpath)))
else:
Vim.command('silent {} {}'.format(open_cmd, fullpath))
# Manually call on_bufenLer as old vim version might not
# trigger BufEnter with the above command. It does not cause
# too much overhead calling on_bufenter two times because most
# of things are cached
self.on_bufenter(Vim.eval("winnr()"))
else:
if self.cur_buf_is_remote:
Rclone.ensure_downloaded(fullpath)
if rifle_cmd is None:
| |
'scale': 1,
'description': '',
'visibility': []},
107: {'type': 'zone', 'name': 'dwToOilRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
108: {'type': 'zone', 'name': 'dwFromBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
109: {'type': 'zone', 'name': 'dwToGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110: {'type': 'zone', 'name': 'dwFromGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
111: {'type': 'zone', 'name': 'dwToPaintMixerRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
112: {'type': 'zone', 'name': 'dwToPaintMixer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
113: {'type': 'zone', 'name': 'dwFromLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
114: {'type': 'zone', 'name': 'dwToLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
115: {'type': 'zone', 'name': 'dwToWarehouseFromRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
116: {'type': 'zone', 'name': 'dwFromLobbyFar',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
117: {'type': 'zone', 'name': 'dwToBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
118: {'type': 'zone', 'name': 'dwToLookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
119: {'type': 'zone', 'name': 'dwFromPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
120: {'type': 'zone', 'name': 'dwToWarehouseFromLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
121: {'type': 'zone', 'name': 'dwToPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
122: {'type': 'zone', 'name': 'dwToWarehouseControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
123: {'type': 'zone', 'name': 'dwFromWarehouseFloor',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
124: {'type': 'zone', 'name': 'dwFromWarehouseRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
125: {'type': 'zone', 'name': 'dwFromWarehouseLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
126: {'type': 'zone', 'name': 'dwFromDuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
127: {'type': 'zone', 'name': 'dwFromOilRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
128: {'type': 'zone', 'name': 'dwToWestSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
129: {'type': 'zone', 'name': 'dwToCenterSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
130: {'type': 'zone', 'name': 'dwToEastSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
131: {'type': 'zone', 'name': 'dwFromStomperAlley',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
200: {'type': 'zone', 'name': 'sky',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
201: {'type': 'zone', 'name': 'extraZone201',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
202: {'type': 'zone', 'name': 'extraZone202',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
203: {'type': 'zone', 'name': 'extraZone203',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
204: {'type': 'zone', 'name': 'extraZone204',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
205: {'type': 'zone', 'name': 'extraZone205',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
206: {'type': 'zone', 'name': 'extraZone206',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
207: {'type': 'zone', 'name': 'extraZone207',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
208: {'type': 'zone', 'name': 'extraZone208',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
209: {'type': 'zone', 'name': 'extraZone209',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
210: {'type': 'zone', 'name': 'extraZone210',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
211: {'type': 'zone', 'name': 'extraZone211',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
212: {'type': 'zone', 'name': 'extraZone212',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
213: {'type': 'zone', 'name': 'extraZone213',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
214: {'type': 'zone', 'name': 'extraZone214',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
215: {'type': 'zone', 'name': 'extraZone215',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
216: {'type': 'zone', 'name': 'extraZone216',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
217: {'type': 'zone', 'name': 'extraZone217',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
218: {'type': 'zone', 'name': 'extraZone218',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
219: {'type': 'zone', 'name': 'extraZone219',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
220: {'type': 'zone', 'name': 'extraZone220',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
221: {'type': 'zone', 'name': 'extraZone221',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
222: {'type': 'zone', 'name': 'dwToEastSiloInterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10010: {'type': 'ambientSound', 'name': 'westWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(-52.7549, -38.8374, 53.3758),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10016: {'type': 'ambientSound', 'name': 'sndConveyorBelt',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_conveyor_belt.ogg',
'volume': 0.5},
10053: {'type': 'ambientSound', 'name': 'eastWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(52.75, -38.84, 53.38),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10055: {'type': 'ambientSound', 'name': 'sndGears',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_gears_turning.ogg',
'volume': 1},
10031: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(-1, 79, 10),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.75, 1, 1),
'cellId': 1,
'radius': 10.0},
10035: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10039,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 4,
'radius': 10.0},
10038: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, -28.04, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 5,
'radius': 10.0},
20048: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0.973602, 71.7, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 0.2, 1),
| |
lambda tstep, tind: self.ClosureP_j.value(),
datatype = 'stringdict',
category = 'Policies & Actions')
""" 2 - HEALTH PARAMETERS """
self.BaseContactR = SD_object('Base Contact Rate',
units = 'people/(day*person)',
init_value = 5,
obtype = 'variable',
func = lambda tstep, tind: self.BaseContactR.value(),
category = 'Health Parameters'
)
self.ContactR = SD_object('Contact Rate',
units = 'people/(day*person)',
init_value = self.ClosureP.value() * self.SocialDisP.value() * self.BaseContactR.value(),
obtype = 'variable',
func = lambda tstep, tind: self.ClosureP.value() * self.SocialDisP.value() * self.BaseContactR.value(),
category = 'Health Parameters'
)
self.Infectivity = SD_object('Infectivity',
units = 'likelihood/contact',
init_value = 0.05,
obtype = 'variable',
func = lambda tstep, tind: self.Infectivity.value(ind=tind),
maxval = lambda: 1,
minval = lambda: 0,
category = 'Health Parameters')
self.AvDur = SD_object('Average Illness Duration',
units = 'Days',
init_value = 14,
obtype = 'variable',
func = lambda tstep, tind: self.AvDur.value(ind=tind),
maxval = lambda: 300,
minval = lambda: 0,
category = 'Health Parameters')
#nationwide
self.RecL = SD_object('Recovery Likelihood',
units = 'probability',
init_value = 0.79,
obtype = 'variable',
func = lambda tstep, tind: self.RecL.value(ind=tind),
maxval = lambda: 1,
minval = lambda: 0,
category = 'Health Parameters')
self.MorL = SD_object('Mortality Likelihood',
units = 'probability',
init_value = 1-self.RecL.value(),
obtype = 'variable',
func = lambda tstep, tind: 1-self.RecL.value(ind=tind),
maxval = lambda: 1,
minval = lambda: 0,
category = 'Health Parameters')
#island specific
self.ContactR_j = SD_object('Contact Rate Java',
units = 'people/(day*person)',
init_value = self.ClosureP_j.value() * self.SocialDisP_j.value() * self.BaseContactR.value(),
obtype = 'variable',
func = lambda tstep, tind: self.ClosureP_j.value() * self.SocialDisP_j.value() * self.BaseContactR.value(),
category = 'Health Parameters'
)
self.ContactR_s = SD_object('Contact Rate Sulawesi',
units = 'people/(day*person)',
init_value = self.ClosureP_s.value() * self.SocialDisP_s.value() * self.BaseContactR.value(),
obtype = 'variable',
func = lambda tstep, tind: self.ClosureP_s.value() * self.SocialDisP_s.value() * self.BaseContactR.value(),
category = 'Health Parameters'
)
""" 3 - HEALTH POPULATIONS """
self.SPop = SD_object('Susceptible Population',
units = 'people',
init_value = lambda: self.historical_data('Susceptible Population', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.SPop.value(ind=tind) - self.InfectR.value(ind=tind) * tstep,
maxval = lambda: 1000000000,
minval = lambda: 0,
category = 'Health Populations')
#nationwide
self.IPop = SD_object("'Estimated' Infected Population",
units = 'people',
init_value = lambda: self.historical_data('True Current Infected', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.IPop.value(ind=tind) + (self.InfectR.value(ind=tind) -
self.RR.value(ind=tind) - self.MR.value(ind=tind)) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.Deaths = SD_object('Deaths',
units = 'people',
init_value = lambda: self.historical_data('Accumulative Deaths', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.Deaths.value(ind=tind) + self.MR.value(ind=tind) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.RPop = SD_object('Known Recovered Population',
units = 'people',
init_value = lambda: self.historical_data('Recovered Population', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.RPop.value(ind=tind) + self.RR.value(ind=tind) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.mIPop = SD_object("Measured Infected Population",
units = 'people',
init_value = lambda: self.historical_data('Measured Current Infected', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.true_to_measured(self.IPop, 14, 0.25),
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
#island specific
self.IPop_j = SD_object("'Estimated' Infected Population Java",
units = 'people',
init_value = lambda: self.historical_data('True Current Infected_java', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.IPop_j.value(ind=tind) + (self.InfectR_j.value(ind=tind) -
self.RR_j.value(ind=tind) - self.MR_j.value(ind=tind)) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.IPop_s = SD_object("'Estimated' Infected Population Sulawesi",
units = 'people',
init_value = lambda: self.historical_data('True Current Infected_SN', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.IPop_s.value(ind=tind) + (self.InfectR_s.value(ind=tind) -
self.RR_s.value(ind=tind) - self.MR_s.value(ind=tind)) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.Deaths_j = SD_object('Deaths Java',
units = 'people',
init_value = lambda: self.historical_data('Accumulative Deaths_java', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.Deaths_j.value(ind=tind) + self.MR_j.value(ind=tind) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.Deaths_s = SD_object('Deaths Sulawesi',
units = 'people',
init_value = lambda: self.historical_data('Accumulative Deaths_SN', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.Deaths_s.value(ind=tind) + self.MR_s.value(ind=tind) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.RPop_j = SD_object('Known Recovered Population Java',
units = 'people',
init_value = lambda: self.historical_data('Recovered Population_java', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.RPop_j.value(ind=tind) + self.RR_j.value(ind=tind) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.RPop_s = SD_object('Known Recovered Population Sulawesi',
units = 'people',
init_value = lambda: self.historical_data('Recovered Population_SN', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.RPop_s.value(ind=tind) + self.RR_s.value(ind=tind) * tstep,
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.mIPop_j = SD_object("Measured Infected Population Java",
units = 'people',
init_value = lambda: self.historical_data('Measured Current Infected_java', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.true_to_measured(self.IPop_j, 14, 0.25),
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.mIPop_s = SD_object("Measured Infected Population Sulawesi",
units = 'people',
init_value = lambda: self.historical_data('Measured Current Infected_SN', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.true_to_measured(self.IPop_s, 14, 0.25),
maxval = lambda: 100000000,
minval = lambda: 0,
category = 'Health Populations')
self.SPop_j = SD_object('Susceptible Population Java',
units = 'people',
init_value = lambda: self.historical_data('Susceptible Population_java', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.SPop_j.value(ind=tind) - self.InfectR_j.value(ind=tind) * tstep,
maxval = lambda: 1000000000,
minval = lambda: 0,
category = 'Health Populations')
self.SPop_s = SD_object('Susceptible Population Sulawesi',
units = 'people',
init_value = lambda: self.historical_data('Susceptible Population_SN', location, filename),
obtype = 'stock',
func = lambda tstep, tind: self.SPop_s.value(ind=tind) - self.InfectR_s.value(ind=tind) * tstep,
maxval = lambda: 1000000000,
minval = lambda: 0,
category = 'Health Populations')
""" 4 - HEALTH FLOWS """
self.InfectR = SD_object("'Estimated' Infection Rate",
units = 'people/day',
init_value = lambda: self.historical_data('True Infection Rate', location, filename),
obtype = 'flow',
func = lambda tstep, tind: (self.combos(self.SPop.value(ind=tind) + self.IPop.value(ind=tind)) - self.combos(self.SPop.value(ind=tind)) - self.combos(self.IPop.value(ind=tind))) /
self.combos(self.SPop.value(ind=tind) + self.IPop.value(ind=tind)) * self.ContactR.value(ind=tind) * (self.SPop.value(ind=tind) + self.IPop.value(ind=tind)) * self.Infectivity.value(ind=tind),
maxval = lambda: self.SPop.value(),
minval = lambda: 0,
category = 'Health Flows'
)
self.mInfectR = SD_object("Measured Infection Rate",
units = 'people/day',
init_value = lambda: self.historical_data('Measured Infection Rate', location, filename),
obtype = 'flow',
func = lambda tstep, tind: self.true_to_measured(self.InfectR, 14, 0.25),
maxval = lambda: self.SPop.value(),
minval = lambda: 0,
category = 'Health Flows'
)
#nationwide
self.RR = SD_object('Recovery Rate',
units = 'people/day',
init_value = self.RecL.value() * self.IPop.value() / self.AvDur.value(),
# init_value = 1,
obtype = 'flow',
func = lambda tstep, tind: self.RecL.value(ind=tind) * self.IPop.value(ind=tind) / self.AvDur.value(ind=tind),
maxval = lambda: self.IPop.value(),
minval = lambda: 0,
category = 'Health Flows'
)
self.MR = SD_object('Mortality Rate',
units = 'people/day',
init_value = self.MorL.value() * self.IPop.value() / self.AvDur.value(),
obtype = 'flow',
func = lambda tstep, tind: self.MorL.value(ind=tind) * self.IPop.value(ind=tind) / self.AvDur.value(ind=tind),
maxval = lambda: self.IPop.value(),
minval = lambda: 0,
category = 'Health Flows'
)
#Island specific
self.RR_j = SD_object('Recovery Rate Java',
units = 'people/day',
init_value = self.RecL.value() * self.IPop_j.value() / self.AvDur.value(),
# init_value = 1,
obtype = 'flow',
func = lambda tstep, tind: self.RecL.value(ind=tind) * self.IPop_j.value(ind=tind) / self.AvDur.value(ind=tind),
maxval = lambda: self.IPop.value(),
minval = lambda: 0,
category = 'Health Flows'
)
self.RR_s = SD_object('Recovery Rate Sulawesi',
units = 'people/day',
init_value = self.RecL.value() * self.IPop_s.value() / self.AvDur.value(),
# init_value = 1,
obtype = 'flow',
func = lambda tstep, tind: self.RecL.value(ind=tind) * self.IPop_s.value(ind=tind) / self.AvDur.value(ind=tind),
maxval = lambda: self.IPop.value(),
minval = lambda: 0,
category = 'Health Flows'
)
self.MR_j = SD_object('Mortality Rate Java',
units = 'people/day',
init_value = self.MorL.value() * self.IPop_j.value() / self.AvDur.value(),
obtype = 'flow',
func = lambda tstep, tind: self.MorL.value(ind=tind) * self.IPop_j.value(ind=tind) / self.AvDur.value(ind=tind),
maxval = lambda: self.IPop.value(),
minval = lambda: 0,
category = 'Health Flows'
)
self.MR_s = SD_object('Mortality Rate Sulawesi',
units = 'people/day',
init_value = self.MorL.value() * self.IPop_s.value() / self.AvDur.value(),
obtype = 'flow',
func = lambda tstep, tind: | |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database code for Swift """
from contextlib import contextmanager, closing
import hashlib
import logging
import os
from uuid import uuid4
import sys
import time
import errno
import six.moves.cPickle as pickle
from swift import gettext_ as _
from tempfile import mkstemp
from eventlet import sleep, Timeout
import sqlite3
from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE
from swift.common.utils import json, Timestamp, renamer, \
mkdirs, lock_parent_directory, fallocate
from swift.common.exceptions import LockTimeout
from swift.common.swob import HTTPBadRequest
#: Whether calls will be made to preallocate disk space for database files.
DB_PREALLOCATION = False
#: Timeout for trying to connect to a DB
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max number of pending entries
PENDING_CAP = 131072
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, unicode) else s) for s in args]
def utf8encodekeys(metadata):
uni_keys = [k for k in metadata if isinstance(k, unicode)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = sv
def _db_timeout(timeout, db_file, call):
with LockTimeout(timeout, db_file):
retry_wait = 0.001
while True:
try:
return call()
except sqlite3.OperationalError as e:
if 'locked' not in str(e):
raise
sleep(retry_wait)
retry_wait = min(retry_wait * 2, 0.05)
class DatabaseConnectionError(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path, msg, timeout=0):
self.path = path
self.timeout = timeout
self.msg = msg
def __str__(self):
return 'DB connection error (%s, %s):\n%s' % (
self.path, self.timeout, self.msg)
class DatabaseAlreadyExists(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path):
self.path = path
def __str__(self):
return 'DB %s already exists' % self.path
class GreenDBConnection(sqlite3.Connection):
"""SQLite DB Connection handler that plays well with eventlet."""
def __init__(self, database, timeout=None, *args, **kwargs):
if timeout is None:
timeout = BROKER_TIMEOUT
self.timeout = timeout
self.db_file = database
super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs)
def cursor(self, cls=None):
if cls is None:
cls = GreenDBCursor
return sqlite3.Connection.cursor(self, cls)
def commit(self):
return _db_timeout(
self.timeout, self.db_file,
lambda: sqlite3.Connection.commit(self))
class GreenDBCursor(sqlite3.Cursor):
"""SQLite Cursor handler that plays well with eventlet."""
def __init__(self, *args, **kwargs):
self.timeout = args[0].timeout
self.db_file = args[0].db_file
super(GreenDBCursor, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
return _db_timeout(
self.timeout, self.db_file, lambda: sqlite3.Cursor.execute(
self, *args, **kwargs))
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest()
return '%032x' % (int(old, 16) ^ int(new, 16))
def get_db_connection(path, timeout=30, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class DatabaseBroker(object):
"""Encapsulates working with a database."""
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False):
"""Encapsulates working with a database."""
self.conn = None
self.db_file = db_file
self.pending_file = self.db_file + '.pending'
self.pending_timeout = pending_timeout or 10
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
self.account = account
self.container = container
self._db_version = -1
def __str__(self):
"""
Returns a string identifying the entity under broker to a human.
The baseline implementation returns a full pathname to a database.
This is vital for useful diagnostics.
"""
return self.db_file
def initialize(self, put_timestamp=None, storage_policy_index=None):
"""
Create the DB
The storage_policy_index is passed through to the subclass's
``_initialize`` method. It is ignored by ``AccountBroker``.
:param put_timestamp: internalized timestamp of initial PUT request
:param storage_policy_index: only required for containers
"""
if self.db_file == ':memory:':
tmp_db_file = None
conn = get_db_connection(self.db_file, self.timeout)
else:
mkdirs(self.db_dir)
fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
os.close(fd)
conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
factory=GreenDBConnection, timeout=0)
# creating dbs implicitly does a lot of transactions, so we
# pick fast, unsafe options here and do a big fsync at the end.
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.executescript("""
CREATE TABLE outgoing_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TABLE incoming_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
""")
if not put_timestamp:
put_timestamp = Timestamp(0).internal
self._initialize(conn, put_timestamp,
storage_policy_index=storage_policy_index)
conn.commit()
if tmp_db_file:
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
raise DatabaseAlreadyExists(self.db_file)
renamer(tmp_db_file, self.db_file)
self.conn = get_db_connection(self.db_file, self.timeout)
else:
self.conn = conn
def delete_db(self, timestamp):
"""
Mark the DB as deleted
:param timestamp: internalized delete timestamp
"""
# first, clear the metadata
cleared_meta = {}
for k in self.metadata:
cleared_meta[k] = ('', timestamp)
self.update_metadata(cleared_meta)
# then mark the db as deleted
with self.get() as conn:
self._delete_db(conn, timestamp)
conn.commit()
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed'
elif 'file is encrypted or is not a database' in str(exc_value):
exc_hint = 'corrupted'
elif 'disk I/O error' in str(exc_value):
exc_hint = 'disk error while accessing'
else:
raise exc_type, exc_value, exc_traceback
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path, fsync=False)
detail = _('Quarantined %s to %s due to %s database') % \
(self.db_dir, quar_path, exc_hint)
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
@contextmanager
def get(self):
"""Use with the "with" statement; returns a database connection."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
try:
self.conn = get_db_connection(self.db_file, self.timeout)
except (sqlite3.DatabaseError, DatabaseConnectionError):
self.possibly_quarantine(*sys.exc_info())
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except sqlite3.DatabaseError:
try:
conn.close()
except Exception:
pass
self.possibly_quarantine(*sys.exc_info())
except (Exception, Timeout):
conn.close()
raise
@contextmanager
def lock(self):
"""Use with the "with" statement; locks a database."""
| |
Foto 29" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 29"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 29"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 30" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 30"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 30"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 31" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 31"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 31"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 32" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 32"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 32"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 33" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 33"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 33"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 34" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 34"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 34"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 35" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 35"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 35"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 36" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 36"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 36"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 37" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 37"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 37"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 38" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 38"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 38"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 39" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 39"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 39"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 40" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 40"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 40"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 41" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 41"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 41"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 42" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 42"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 42"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 43" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 43"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 43"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 44" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 44"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 44"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 45" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 45"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 45"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 46" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 46"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 46"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 47" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 47"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 47"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 48" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 48"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 48"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 49" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 49"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 49"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 50" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 50"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 50"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 51" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 51"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 51"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 52" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 52"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 52"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 53" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 53"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 53"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 54" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 54"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 54"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 55" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 55"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 55"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 56" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 56"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 56"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 57" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 57"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 57"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 58" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 58"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 58"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 59" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 59"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 59"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 60" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 60"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 60"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 61" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 61"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 61"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 62" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 62"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 62"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 63" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 63"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 63"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 64" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 64"
alt="Fotos BIXURRASCO - XV Edicao - | |
'addHCtsBut',
#'widgetType':Tkinter.Button,
#'text': 'Add A Hard Constraint:\n(only 1 possible per torsion)',
#'command': self.getNewHardTorCts,
#'gridcfg':{'sticky':Tkinter.W+Tkinter.E,'row':-1, 'column':2, 'columnspan':4}})
ifd.append({'name': 'acceptB',
'widgetType': tkinter.Button,
'text':'Accept',
'wcfg':{'bd':4},
'gridcfg':{'sticky':tkinter.E+tkinter.W, 'columnspan':3},
'command':self.Accept_cb})
ifd.append({'name': 'closeB',
'widgetType': tkinter.Button,
'text':'Close',
'wcfg':{'bd':4},
'gridcfg':{'sticky':tkinter.E+tkinter.W, 'row':-1, 'column':3,'columnspan':3},
'command':self.Close_cb})
#initialize tran0, dihe0 and quat0
self.tran0.set(self.about.get())
nstr= '0. '*self.ndihe
self.dihe0.set(nstr)
self.quat0.set('1.0 0. 0. 0.')
def Accept_cb(self, event=None):
changeVals = {}
for item in [ 'dihe0Ent', 'tran0Ent', 'quat0Ent']:
var = self.ifd.entryByName[item]['wcfg']['textvariable']
#FIX THIS
if self.vf.dpo[item[:-3]]['value']!= var.get():
changeVals[item[:-3]] = var.get()
oldVal = self.vf.dpo['torsdof4']['value']
if oldVal[0]!= self.torsdof:
changeVals['torsdof4'] = [self.torsdof]
if len(list(changeVals.keys()))>0:
changeVals['topCommand'] = 0
self.doitWrapper(*(), **changeVals)
else:
self.form.withdraw()
def Close_cb(self, event=None):
self.form.withdraw()
def doit(self,*args, **kw):
ligand = self.vf.dpo.ligand
d = {}
for a in ligand.allAtoms:
d[a.autodock_element] = 1
self.vf.DPF_LIGAND_TYPES = list(d.keys())
self.vf.ADdpf_setDpo(*(), **kw)
def getNewGaussTorCts(self, event=None):
num = self.tcts.get()
self.torsNum.append(tkinter.StringVar(master=self.vf.GUI.ROOT))
self.torsPAngle.append(tkinter.StringVar(master=self.vf.GUI.ROOT))
self.halfWidth.append(tkinter.StringVar(master=self.vf.GUI.ROOT))
ifd2 = self.ifd2 = InputFormDescr(title = "Gaussian Torsion Constraint Parameters")
numStr = 'Torsion Number:\n(1-'+str(self.ndihe)+')'
self.ifd2.append( { 'name': 'tnumEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': numStr,
'textvariable': self.torsNum[num]
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
self.ifd2.append( { 'name': 'pangEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Perferred Angle(degrees):',
'textvariable': self.torsPAngle[num]
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
self.ifd2.append( { 'name': 'hwidthEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Half-Width(degrees):',
'textvariable': self.halfWidth[num]
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
self.ifd2.append({'name': 'showTorpenCB',
'widgetType':tkinter.Radiobutton,
'text': 'Store + Output torsion energies',
'wcfg': {'value':'1'},
'variable': self.showtorpen,
'gridcfg':{'sticky':tkinter.W}})
self.ifd2.append({'name': 'noTorpenCB',
'widgetType':tkinter.Radiobutton,
'text': 'Don\'t Store + Output torsion energies',
'variable': self.showtorpen,
'wcfg': {'value':'0'},
'gridcfg':{'sticky':tkinter.W}})
val = self.vf.getUserInput(self.ifd2)
if len(val) and val['tnumEnt']=='' or val['pangEnt']=='' or val['hwidthEnt']=='':
val = None
if not val:
del self.torsNum[num]
del self.torsPAngle[num]
del self.halfWidth[num]
else:
newStr=[]
newStr.append(self.torsNum[num].get())
newStr.append(self.torsPAngle[num].get())
newStr.append(self.halfWidth[num].get())
self.gaussTorCts.append(newStr)
self.tcts.set(num+1)
def getNewHardTorCts(self, event=None):
torsNum =tkinter.StringVar(master=self.vf.GUI.ROOT)
torsPAngle=tkinter.StringVar(master=self.vf.GUI.ROOT)
halfWidth=tkinter.StringVar(master=self.vf.GUI.ROOT)
ifd2 = self.ifd2 = InputFormDescr(title = "Hard Torsion Constraint Parameters")
numStr = 'Torsion Number:\n(1-'+str(self.ndihe)+')'
self.ifd2.append( { 'name': 'tnumEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': numStr,
'textvariable': torsNum
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
self.ifd2.append( { 'name': 'pangEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Perferred Relative Angle(degrees):',
'textvariable': torsPAngle
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
self.ifd2.append( { 'name': 'hwidthEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Full Width of allowed range(degrees):',
'textvariable': halfWidth
},
'gridcfg':{'sticky':tkinter.W, 'columnspan':4}})
val = self.vf.getUserInput(self.ifd2)
if val:
num = int(val['tnumEnt'])
newEnt=[]
newEnt.append(val['tnumEnt'])
newEnt.append(val['pangEnt'])
newEnt.append(val['hwidthEnt'])
self.hardTorCts[num]=newEnt
def getTorCts(self, event=None):
w=self.ifd.entryByName['barrierLab']
w1=self.ifd.entryByName['barrierEnt']
w2=self.ifd.entryByName['barrierCB']
w3=self.ifd.entryByName['addGCtsBut']
w4=self.ifd.entryByName['addHCtsBut']
if self.specifyTorCts.get()=='1':
w['widget'].grid(w['gridcfg'])
w1['widget'].grid(w1['gridcfg'])
w2['widget'].grid(w2['gridcfg'])
w3['widget'].grid(w3['gridcfg'])
w4['widget'].grid(w4['gridcfg'])
else:
w['widget'].grid_forget()
w1['widget'].grid_forget()
w2['widget'].grid_forget()
w3['widget'].grid_forget()
w4['widget'].grid_forget()
def getDihe(self, event=None):
w=self.ifd.entryByName['useInitDiheLab']
w1=self.ifd.entryByName['dihe0Ent']
w2=self.ifd.entryByName['initDiheCB']
if self.specifyDihe.get()=='1':
w['widget'].grid(w['gridcfg'])
w1['widget'].grid(w1['gridcfg'])
w2['widget'].grid(w2['gridcfg'])
else:
w['widget'].grid_forget()
w1['widget'].grid_forget()
w2['widget'].grid_forget()
def set_barrier(self):
if self.barVar.get()=='1':
self.barrier.set('')
def set_initDiheType(self):
if self.initDiheType.get()==1:
self.dihe0.set('random')
else:
nstr= '0. '*self.ndihe
self.dihe0.set(nstr)
def set_initTransType(self):
w=self.ifd.entryByName['tran0Ent']['widget']
if self.initTransType.get()==1:
self.tran0.set('random')
else:
self.tran0.set(self.about.get())
def set_initQuatType(self):
if self.initQuatType.get()==1:
self.quat0.set('random')
else:
self.quat0.set('1.0 0. 0. 0.')
def guiCallback(self):
#Dpf4InitLigand
if not hasattr(self.vf.dpo,'ligand'):
msgStr="Choose/Open Ligand first."
self.vf.warningMsg(msgStr)
return
ligandfilename =self.vf.dpo['move']['value']
ligand = self.vf.dpo.ligand
ligstring = ligand.types
if hasattr(ligand, 'ndihe'):
self.ndihe = ligand.ndihe
elif hasattr(ligand, 'torscount'):
self.ndihe = ligand.torscount
else:
msgStr="Selected Ligand not formatted!!"
self.vf.warningMsg(msgStr)
return
self.torsdof = ligand.TORSDOF
d = {}
for a in ligand.allAtoms:
d[a.autodock_element] = 1
autodock_types = list(d.keys())
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
autodock_types.sort()
autodock_types_str = autodock_types[0]
for t in autodock_types[1:]:
autodock_types_str = autodock_types_str + " " + t
self.types.set(autodock_types_str)
self.vf.dpo['ligand_types']['value'] = autodock_types_str
ligand.autodock_types = autodock_types_str
if ligand==None:
msgStr="No Ligand Selected!"
self.vf.warningMsg(msgStr)
return
if not hasattr(self, 'form'):
self.buildForm()
self.form = self.vf.getUserInput(self.ifd, modal=0, blocking=0)
self.form.root.protocol('WM_DELETE_WINDOW',self.Close_cb)
self.initTransType.set(1)
self.initDiheType.set(1)
self.initQuatType.set(1)
self.fmap.set(0)
#this overwrites previous values
self.set_initTransType()
self.set_initQuatType()
self.set_initDiheType()
else:
self.form.root.deiconify()
self.torsdofcoeff.set("")
self.vf.dpo['torsdof4']['value'][0] = ligand.TORSDOF
if not hasattr(self, 'old_ligand') or self.old_ligand!=ligand:
v = self.vf.dpo['about']['value']
strCenter = "%6.3f %6.3f %6.3f" %(v[0], v[1], v[2])
self.about.set(strCenter)
self.old_ligand=ligand
self.ligMsgStr.set( 'Ligand: '+ ligandfilename)
if self.ndihe:
self.ndiheMsgStr.set('Number of Active Torsions in Ligand: '+ str(self.ndihe))
else:
self.ndiheMsgStr.set('No Torsions Specified in Ligand!')
if self.torsdof:
self.tdofMsgStr.set('Number of Torsional Degrees of Freedom(torsdof) in Ligand: '+ str(self.torsdof))
else:
self.tdofMsgStr.set('No Torsional Degrees of\nFreedom in Ligand! ')
self.typesMsgStr.set('Ligand Atom Types: ' + self.types.get())
self.centerMsgStr.set('Center of Ligand Molecule: ' + self.about.get())
Dpf4InitLigandGUI=CommandGUI()
Dpf4InitLigandGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['AdjustLigand4'], cascadeName = menuText['SetLigandParmsMB'])
class Dpf4LigandChooser(MVCommand):
""" allows user to choose a molecule already present for the ligand"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
#self.vf.loadModule('autotorsCommands', 'AutoDockTools')
if not hasattr(self.vf, 'atorsDict'):
self.vf.atorsDict = {}
self.vf.loadModule('displayCommands')
self.vf.loadModule('bondsCommands')
self.vf.loadModule('fileCommands')
def chooseLigand_cb(self, event = None):
"""called each time the 'choose Ligand' button is pressed"""
mol = self.chooser.getMolSet()
if mol:
ask = 1
self.doitWrapper(mol, ask, log=1, redraw=1)
self.chooser.form.withdraw()
def guiCallback(self):
self.mode = 'single'
self.title= 'Choose Ligand'
self.chooser = MoleculeChooser(self.vf, self.mode, self.title)
self.chooser.ipf.append({'name':'Select Button',
'widgetType':tkinter.Button,
'text':'Select Ligand',
'wcfg':{'bd':6},
'gridcfg':{'sticky':tkinter.E+tkinter.W},
'command': self.chooseLigand_cb})
self.form = self.chooser.go(modal=0, blocking=0)
lb = self.chooser.ipf.entryByName['Molecule']['widget'].lb
lb.bind("<Double-Button-1>",self.chooseLigand_cb)
def __call__(self, nodes, ask=1, **kw):
self.doitWrapper(*(nodes, ask), **kw)
def doit(self, nodes, ask):
lig = self.vf.expandNodes(nodes)[0]
d = {}
for a in lig.allAtoms:
d[a.autodock_element] = 1
self.vf.DPF_LIGAND_TYPES = list(d.keys())
isAtorsMol = 0
isDpoLigand = 0
hasTorTree = hasattr(lig, 'torTree')
d = self.vf.atorsDict
if 'molecule' in d and d['molecule']==lig:
isAtorsMol = 1
lig.ndihe = lig.torscount
if hasattr(self.vf, 'dpo'):
obj = self.vf.dpo
if hasattr(obj, 'ligand') and obj.ligand == lig:
isDpoLigand = 1
if hasTorTree or isAtorsMol or isDpoLigand:
setDpoFields(lig, self.vf.dpo)
else:
msgStr = 'can only selectLigand\n preformatted by autotors\
(and Written to a file)\n for this option!'
self.vf.warningMsg(msgStr)
return 'ERROR'
if self.vf.hasGui:
self.vf.colorByAtomType(lig, topCommand = 0, redraw=1)
aromaticCs = AtomSet([x for x in lig.allAtoms if x.autodock_element=='A'])
if len(aromaticCs):
self.vf.color(aromaticCs,((0.,1.,0.),),['lines'],topCommand=0, redraw=1)
if ask:
self.vf.ADdpf4_initLigand.guiCallback()
else:
d = {}
if hasattr(lig, 'outputfile'):
d['move'] = lig.outputfile
d['torsdof4'] = [lig.torsdof, self.vf.dpo['torsdof4']['value'][1]]
d['about'] = lig.center
d['ndihe'] = lig.ndihe
d['types'] = lig.types
w = {}
for a in lig.allAtoms:
w[a.autodock_element] = 1
ligtypes = list(w.keys())
ligtypes.sort()
ligtypestr = ligtypes[0]
for t in ligtypes[1:]:
lig_type_str = lig_type_str + " " + t
d['ligand_types'] = lig_type_str
self.vf.ADdpf_setDpo(*(), **d)
Dpf4LigandChooserGUI=CommandGUI()
Dpf4LigandChooserGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],\
menuText['ChooseLigand4'], cascadeName = menuText['SetLigandParmsMB'])
class Dpf4LigPDBQReader(MVCommand):
""" allows user to choose a PDBQ file for the ligand"""
def onAddCmdToViewer(self):
if self.vf.hasGui and not hasattr(self.vf,'readMolecule'):
self.vf.loadCommand('fileCommands', 'readMolecule', 'Pmv')
checkHasDpo(self.vf)
def guiCallback(self):
"""called each time the 'select ligand' button is pressed"""
ligFile = self.vf.askFileOpen(types=[ ('PDBQT files:', '*.pdbqt'),
('Autotors-format convention:', '*.out.pdbqt')],
title = 'Formatted Ligand File:')
if ligFile:
self.doitWrapper(ligFile, log=1, redraw=1)
def __call__(self, ligFile, ask = 1, **kw):
"""None<-ADdpf4_readFormattedLigand
ligFile: file containing the ligand
ask: flag for whether to update geometry
"""
if not os.path.exists(ligFile):
raise IOError
kw['ask'] = ask
self.doitWrapper(*(ligFile,), **kw)
def doit(self, ligFile, ask=1):
ligand = self.vf.readMolecule(ligFile)[0]
ligFile = os.path.basename(ligFile)
assert hasattr(ligand, 'TORSDOF'), 'ligand must be preformatted with autotors'
setDpoFields(ligand, self.vf.dpo)
#color and check charges here
d = {}
for a in ligand.allAtoms:
d[a.autodock_element] = 1
self.vf.DPF_LIGAND_TYPES = list(d.keys())
if self.vf.hasGui and hasattr(ligand,'ndihe') and ask:
self.vf.colorByAtomType(ligand, topCommand = 0, redraw=1)
aromaticCs = AtomSet([x for x in ligand.allAtoms if x.autodock_element=='A'])
if len(aromaticCs):
self.vf.color(aromaticCs,((0.,1.,0.),),['lines'],topCommand=0, redraw=1)
self.vf.ADdpf4_initLigand.guiCallback()
Dpf4LigPDBQReaderGUI = CommandGUI()
Dpf4LigPDBQReaderGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['ReadLigand4'], cascadeName = menuText['SetLigandParmsMB'])
class DpfEditor(MVCommand):
""" allows user to edit current output file and write it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, **kw):
self.doitWrapper(*(), **kw)
def doit(self):
if self.vf.dpo.dpf_written_filename:
filename = self.vf.dpo.dpf_written_filename
fptr=open(filename,'r')
allLines=fptr.readlines()
ss=''
for item in allLines:
ss=ss+item
else:
ss = ''
filename=''
titleStr = 'Edit dpf'
self.ifd = ifd = InputFormDescr(title = titleStr)
ifd.append({'name': 'dpfText',
'size':[80,30],
'label':filename,
'defaultValue':ss,
'widgetType':'ScrolledText',
'writeFileType':[('Docking Parameter Files','*.dpf'),('Vina Config Files', '*.txt')],
'readFileType':[('Docking Parameter Files','*.dpf'),('Vina Config Files', '*.txt')],
'readButton':1,'writeButton':1})
vals = self.vf.getUserInput(ifd)
def guiCallback(self):
self.doitWrapper(log=1,redraw=0)
DpfEditorGUI=CommandGUI()
DpfEditorGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['EditDpfMB'])
class DpfSAWriter(MVCommand):
""" allows user to choose an output filename and write simulated annealing parameters"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
#to remove pickle problem, assume dpo is current self.vf.dpo
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = simulated_annealing_list
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
self.vf.dpo.write(outfile, simulated_annealing_list)
#this is set when the dpo is written
#self.vf.dpo.dpf_filename = outfile
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'SA Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
DpfSAWriterGUI=CommandGUI()
DpfSAWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],\
menuText['WriteSA'], cascadeName = menuText['WriteDpfMB'])
class Dpf41EPDBWriter(MVCommand):
""" allows user to choose an output filename and write it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
orig_epdb_flag = self.vf.dpo['epdb_flag']['value']
self.vf.dpo['epdb_flag']['value'] = 1
self.vf.dpo.write42(outfile, epdb_list4_2)
#msg = "restoring dpo epdb_flag value from current "+ str(self.vf.dpo['epdb_flag']['value'])
self.vf.dpo['epdb_flag']['value'] = orig_epdb_flag
#msg= msg + " to " + str(self.vf.dpo['epdb_flag']['value'])
#self.vf.warningMsg(msg)
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule | |
will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestOpportunityNoteSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityNoteSynchronizer
model_class = models.OpportunityNoteTracker
fixture = fixtures.API_SALES_OPPORTUNITY_NOTE_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.text, json_data['text'])
self.assertEqual(instance.opportunity.id,
json_data['opportunityId'])
def setUp(self):
super().setUp()
fixture_utils.init_opportunity_notes()
fixture_utils.init_opportunity_stages()
fixture_utils.init_opportunities()
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_notes_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
text = "Different Text, not the same text, but new, better text."
new_json = deepcopy(self.fixture[0])
new_json['text'] = text
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.text, text)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
text = "Different Text, not the same text, but new, better text."
new_json = deepcopy(self.fixture[0])
new_json['text'] = text
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestMemberSynchronization(TransactionTestCase, AssertSyncMixin):
model_class = models.MemberTracker
def setUp(self):
self.identifier = 'User1'
mocks.system_api_get_members_call([fixtures.API_MEMBER])
self.synchronizer = sync.MemberSynchronizer()
mocks.system_api_get_member_image_by_photo_id_call(
(mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar()))
def _assert_member_fields(self, local_member, api_member):
self.assertEqual(local_member.first_name, api_member['firstName'])
self.assertEqual(local_member.last_name, api_member['lastName'])
self.assertEqual(local_member.office_email, api_member['officeEmail'])
def test_sync_member_update(self):
member = models.Member()
member.id = 176
member.identifier = self.identifier
member.first_name = 'some stale first name'
member.last_name = 'some stale <NAME>'
member.office_email = '<EMAIL>'
member.save()
self.synchronizer.sync()
local_member = models.Member.objects.get(identifier=self.identifier)
api_member = fixtures.API_MEMBER
self._assert_member_fields(local_member, api_member)
def test_sync_member_create(self):
self.synchronizer.sync()
local_member = models.Member.objects.all().first()
api_member = fixtures.API_MEMBER
self._assert_member_fields(local_member, api_member)
self.assert_sync_job()
def test_sync_member_with_no_photo(self):
member_without_photo = deepcopy(fixtures.API_MEMBER)
member_without_photo.pop('photo')
mocks.system_api_get_members_call([member_without_photo])
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
local_member = models.Member.objects.get(identifier=self.identifier)
self._assert_member_fields(local_member, member_without_photo)
local_avatar = local_member.avatar
self.assertFalse(local_avatar)
def test_sync_member_avatar_name_is_updated(self):
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
member = models.Member.objects.get(identifier=self.identifier)
old_avatar = member.avatar
member.avatar = 'new_image_name.png'
self.synchronizer.sync()
self.assertNotEqual(old_avatar, member.avatar)
def test_avatar_thumbnails_are_in_storage(self):
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
member = models.Member.objects.get(identifier=self.identifier)
attachment_filename = 'some_new_image.png'
avatar = mocks.get_member_avatar()
self.synchronizer._save_avatar(member, avatar, attachment_filename)
filename = '{}.{}'.format(get_hash(avatar), 'png')
micro_avatar_size = filename + '20x20.png'
avatar_size = filename + '80x80.png'
self.assertTrue(default_storage.exists(avatar_size))
self.assertTrue(default_storage.exists(micro_avatar_size))
@override_settings(DEFAULT_FILE_STORAGE='storages.backends.'
's3boto3.S3Boto3Storage',
AWS_STORAGE_BUCKET_NAME='somebucket')
def test_app_wont_crash_without_DO_credentials(self):
self.synchronizer = sync.MemberSynchronizer()
api_member = fixtures.API_MEMBER
# If this doesn't raise an exception, GREAT!
self.synchronizer.update_or_create_instance(api_member)
# Now we check to make sure that it WOULD have raised an exception
# if we didn't call it from its safe location. If no exceptions
# occur then there is still a problem.
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
member = models.Member.objects.get(identifier=self.identifier)
attachment_filename = 'some_new_image.png'
avatar = mocks.get_member_avatar()
with self.assertRaises(NoCredentialsError):
self.synchronizer._save_avatar(
member, avatar, attachment_filename)
class TestOpportunitySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunitySynchronizer
model_class = models.OpportunityTracker
fixture = fixtures.API_SALES_OPPORTUNITIES
def setUp(self):
super().setUp()
self.synchronizer = self.synchronizer_class()
mocks.sales_api_get_opportunity_types_call(
fixtures.API_SALES_OPPORTUNITY_TYPES)
fixture_utils.init_activities()
fixture_utils.init_opportunity_statuses()
fixture_utils.init_opportunity_types()
fixture_utils.init_sales_probabilities()
fixture_utils.init_members()
fixture_utils.init_territories()
fixture_utils.init_companies()
def call_api(self, return_data):
return mocks.sales_api_get_opportunities_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.expected_close_date,
parse(json_data['expectedCloseDate']).date())
self.assertEqual(instance.pipeline_change_date,
parse(json_data['pipelineChangeDate']))
self.assertEqual(instance.date_became_lead,
parse(json_data['dateBecameLead']))
self.assertEqual(instance.closed_date,
parse(json_data['closedDate']))
self.assertEqual(instance.notes, json_data['notes'])
self.assertEqual(instance.source, json_data['source'])
self.assertEqual(instance.location_id, json_data['locationId'])
self.assertEqual(instance.business_unit_id,
json_data['businessUnitId'])
self.assertEqual(instance.customer_po,
json_data['customerPO'])
self.assertEqual(instance.priority_id,
json_data['priority']['id'])
self.assertEqual(instance.stage_id,
json_data['stage']['id'])
self.assertEqual(instance.opportunity_type_id,
json_data['type']['id'])
self.assertEqual(instance.status_id,
json_data['status']['id'])
self.assertEqual(instance.primary_sales_rep_id,
json_data['primarySalesRep']['id'])
self.assertEqual(instance.secondary_sales_rep_id,
json_data['secondarySalesRep']['id'])
self.assertEqual(instance.company_id,
json_data['company']['id'])
self.assertEqual(instance.closed_by_id,
json_data['closedBy']['id'])
def test_fetch_sync_by_id(self):
json_data = self.fixture[0]
_, patch = mocks.sales_api_by_id_call(json_data)
result = self.synchronizer.fetch_sync_by_id(json_data['id'])
self._assert_fields(result, json_data)
patch.stop()
# TODO This test does nothing, must be updated
# def test_fetch_delete_by_id(self):
# json_data = self.fixture[0]
# _, patch = mocks.sales_api_by_id_call(json_data)
# self.synchronizer.fetch_delete_by_id(json_data['id'])
# self.assertFalse(Opportunity.objects.filter(
# id=json_data['id']).exists())
# patch.stop()
class TestOpportunityStageSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityStageSynchronizer
model_class = models.OpportunityStageTracker
fixture = fixtures.API_SALES_OPPORTUNITY_STAGES
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_stages_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestOpportunityStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityStatusSynchronizer
model_class = models.OpportunityStatusTracker
fixture = fixtures.API_SALES_OPPORTUNITY_STATUSES
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.won_flag, json_data['wonFlag'])
self.assertEqual(instance.lost_flag, json_data['lostFlag'])
self.assertEqual(instance.closed_flag, json_data['closedFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
class TestOpportunityTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityTypeSynchronizer
model_class = models.OpportunityTypeTracker
fixture = fixtures.API_SALES_OPPORTUNITY_TYPES
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.description, json_data['description'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.description,
description)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestHolidaySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.HolidaySynchronizer
model_class = models.HolidayTracker
fixture = fixtures.API_SCHEDULE_HOLIDAY_MODEL_LIST
def setUp(self):
fixture_utils.init_holiday_lists()
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.all_day_flag, json_data['allDayFlag'])
self.assertEqual(instance.date, parse(json_data['date']).date())
self.assertEqual(
instance.start_time, parse(json_data['timeStart']).time())
self.assertEqual(instance.end_time, parse(json_data['timeEnd']).time())
self.assertEqual(
instance.holiday_list.id, json_data['holidayList']['id'])
def call_api(self, return_data):
return mocks.schedule_api_get_holidays_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
new_json = deepcopy(self.fixture[0])
name = 'A new name'
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(
original.name,
name)
self._assert_fields(changed, new_json)
class TestHolidayListSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.HolidayListSynchronizer
model_class = models.HolidayListTracker
fixture = fixtures.API_SCHEDULE_HOLIDAY_LIST_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
def call_api(self, return_data):
return mocks.schedule_api_get_holiday_lists_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
new_json = deepcopy(self.fixture[0])
name = 'A new name'
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name,
name)
self._assert_fields(changed, new_json)
class TestCalendarSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CalendarSynchronizer
model_class = models.CalendarTracker
fixture = fixtures.API_SCHEDULE_CALENDAR_LIST
def call_api(self, return_data):
return mocks.schedule_api_get_calendars_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'A New Calendar'
new_json = deepcopy(json_data)
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(
instance.monday_start_time,
parse(json_data['mondayStartTime']).time()
)
self.assertEqual(
instance.monday_end_time,
parse(json_data['mondayEndTime']).time()
)
self.assertEqual(
instance.tuesday_start_time,
parse(json_data['tuesdayStartTime']).time()
)
self.assertEqual(
instance.tuesday_end_time,
parse(json_data['tuesdayEndTime']).time()
)
self.assertEqual(
instance.wednesday_start_time,
parse(json_data['wednesdayStartTime']).time()
)
self.assertEqual(
instance.wednesday_end_time,
parse(json_data['wednesdayEndTime']).time()
)
self.assertEqual(
instance.thursday_start_time,
parse(json_data['thursdayStartTime']).time()
)
self.assertEqual(
instance.thursday_end_time,
parse(json_data['thursdayEndTime']).time()
)
self.assertEqual(
instance.friday_start_time,
parse(json_data['fridayStartTime']).time()
)
self.assertEqual(
instance.friday_end_time,
parse(json_data['fridayEndTime']).time()
)
# Dont parse these ones they are None in the fixtures
self.assertEqual(
instance.saturday_start_time,
json_data['saturdayStartTime']
)
self.assertEqual(
instance.saturday_end_time,
json_data['saturdayEndTime']
)
self.assertEqual(
instance.sunday_start_time,
json_data['sundayStartTime']
)
self.assertEqual(
instance.sunday_end_time,
json_data['sundayEndTime']
)
class TestMyCompanyOtherSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.MyCompanyOtherSynchronizer
model_class = models.MyCompanyOtherTracker
fixture = fixtures.API_SYSTEM_OTHER_LIST
def setUp(self):
fixture_utils.init_calendars()
fixture_utils.init_others()
def call_api(self, return_data):
return mocks.system_api_get_other_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
new_json = deepcopy(json_data)
new_json['defaultCalendar'] = None
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(
original.default_calendar, changed.default_calendar)
def test_sync_skips(self):
self._sync(self.fixture)
new_json = deepcopy(self.fixture[0])
new_json['defaultCalendar'] = None
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
class TestSLASynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.SLASynchronizer
model_class = models.SlaTracker
fixture = fixtures.API_SERVICE_SLA_LIST
def setUp(self):
fixture_utils.init_calendars()
def call_api(self, return_data):
return mocks.service_api_get_slas_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'A Different SLA'
new_json = deepcopy(json_data)
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.respond_hours, json_data['respondHours'])
self.assertEqual(instance.plan_within, json_data['planWithin'])
self.assertEqual(instance.resolution_hours,
json_data['resolutionHours'])
class TestSLAPrioritySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.SLAPrioritySynchronizer
model_class = models.SlaPriorityTracker
fixture = fixtures.API_SERVICE_SLA_PRIORITY_LIST
def setUp(self):
fixture_utils.init_calendars()
fixture_utils.init_slas()
fixture_utils.init_priorities()
def call_api(self, return_data):
return mocks.service_api_get_sla_priorities_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
respond_hours = 500
new_json = deepcopy(json_data)
new_json['respondHours'] = respond_hours
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.respond_hours, respond_hours)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
respond_hours = 500
new_json = deepcopy(self.fixture[0])
new_json['respondHours'] = respond_hours
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.respond_hours, json_data['respondHours'])
self.assertEqual(instance.plan_within, json_data['planWithin'])
self.assertEqual(instance.resolution_hours,
json_data['resolutionHours'])
class TestTicketSynchronizerMixin(AssertSyncMixin):
model_class = models.TicketTracker
def setUp(self):
super().setUp()
mocks.system_api_get_members_call(fixtures.API_MEMBER_LIST)
mocks.system_api_get_member_image_by_photo_id_call(
(mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar()))
def _clean(self):
models.Ticket.objects.all().delete()
def _init_data(self):
self._clean()
fixture_utils.init_boards()
fixture_utils.init_board_statuses()
fixture_utils.init_teams()
fixture_utils.init_members()
fixture_utils.init_companies()
fixture_utils.init_priorities()
fixture_utils.init_projects()
fixture_utils.init_locations()
fixture_utils.init_calendars()
fixture_utils.init_slas()
fixture_utils.init_types()
fixture_utils.init_subtypes()
fixture_utils.init_items()
fixture_utils.init_agreements()
def test_sync_ticket(self):
"""
Test to ensure ticket synchronizer saves a CW Ticket instance
locally.
"""
synchronizer = self.sync_class()
synchronizer.sync()
self.assertGreater(models.Ticket.objects.all().count(), 0)
json_data = self.ticket_fixture
instance = models.Ticket.objects.get(id=json_data['id'])
self._assert_sync(instance, json_data)
self.assert_sync_job()
def test_sync_ticket_truncates_automatic_cc_field(self):
"""
Test to ensure ticket synchronizer truncates the automatic CC field
to 1000 characters.
"""
synchronizer = self.sync_class()
instance = models.Ticket()
field_data = "<EMAIL>;"
for i in range(6):
# Make some field data with length 1152
field_data = field_data + field_data
json_data = deepcopy(self.ticket_fixture)
json_data['automaticEmailCc'] = field_data
instance = synchronizer._assign_field_data(instance, json_data)
self.assertEqual(len(instance.automatic_email_cc), 1000)
| |
# coding=utf-8
import struct
from ..base import BaseTopazTest
class TestArrayObject(BaseTopazTest):
def test_to_s(self, space):
w_res = space.execute("return [].to_s")
assert space.str_w(w_res) == "[]"
w_res = space.execute("return [[1]].to_s")
assert space.str_w(w_res) == "[[1]]"
w_res = space.execute("return [[1], [2], [3]].to_s")
assert space.str_w(w_res) == "[[1], [2], [3]]"
def test_subscript(self, space):
w_res = space.execute("return [1][0]")
assert space.int_w(w_res) == 1
w_res = space.execute("return [1].at(0)")
assert space.int_w(w_res) == 1
w_res = space.execute("return [1][1]")
assert w_res is space.w_nil
w_res = space.execute("return [1][-1]")
assert space.int_w(w_res) == 1
w_res = space.execute("return [1][-2]")
assert w_res == space.w_nil
w_res = space.execute("return [1, 2][0, 0]")
assert self.unwrap(space, w_res) == []
w_res = space.execute("return [1, 2][0, 1]")
assert self.unwrap(space, w_res) == [1]
w_res = space.execute("return [1, 2][0, 5]")
assert self.unwrap(space, w_res) == [1, 2]
w_res = space.execute("return [1, 2][0, -1]")
assert w_res is space.w_nil
w_res = space.execute("return [1, 2][-1, 1]")
assert self.unwrap(space, w_res) == [2]
w_res = space.execute("return [1, 2][-2, 2]")
assert self.unwrap(space, w_res) == [1, 2]
w_res = space.execute("return [1, 2][-2, 2]")
assert self.unwrap(space, w_res) == [1, 2]
with self.raises(space, "TypeError"):
space.execute("[1, 2][1..2, 1]")
w_res = space.execute("""
class String; def to_int; 1; end; end
return [1, 2]["1", "1"]
""")
assert self.unwrap(space, w_res) == [2]
def test_subscript_assign(self, space):
w_res = space.execute("a = [1]; a[0] = 42; return a")
assert self.unwrap(space, w_res) == [42]
w_res = space.execute("a = [1]; a[1] = 42; return a")
assert self.unwrap(space, w_res) == [1, 42]
w_res = space.execute("a = [1]; a[-1] = 42; return a")
assert self.unwrap(space, w_res) == [42]
with self.raises(space, "IndexError", "index -2 too small for array; minimum: -1"):
space.execute("a = [1]; a[-2] = 42")
w_res = space.execute("a = [1, 2]; a[0, 0] = 42; return a")
assert self.unwrap(space, w_res) == [42, 1, 2]
w_res = space.execute("a = []; a[0, 0] = [3, 4, 5]; return a")
assert self.unwrap(space, w_res) == [3, 4, 5]
w_res = space.execute("a = [1, 2]; a[0, 1] = 42; return a")
assert self.unwrap(space, w_res) == [42, 2]
w_res = space.execute("a = [1, 2]; a[0, 5] = 42; return a")
assert self.unwrap(space, w_res) == [42]
with self.raises(space, "IndexError", "negative length (-1)"):
w_res = space.execute("a = [1, 2]; a[0, -1] = 42")
w_res = space.execute("a = [1, 2]; a[-1, 1] = 42; return a")
assert self.unwrap(space, w_res) == [1, 42]
w_res = space.execute("a = [1, 2]; a[-2, 2] = 42; return a")
assert self.unwrap(space, w_res) == [42]
def test_length(self, space):
w_res = space.execute("return [1, 2, 3].length")
assert space.int_w(w_res) == 3
def test_emptyp(self, space):
w_res = space.execute("return [].empty?")
assert w_res is space.w_true
w_res = space.execute("return [1].empty?")
assert w_res is space.w_false
def test_plus(self, space):
w_res = space.execute("return [1, 2] + [3]")
assert self.unwrap(space, w_res) == [1, 2, 3]
with self.raises(space, "TypeError", "can't convert Symbol into Array"):
space.execute("[1, 2] + :not_an_array")
w_res = space.execute("""
class NotAnArray
def to_ary
[8, 7]
end
end
return [9] + NotAnArray.new
""")
assert self.unwrap(space, w_res) == [9, 8, 7]
def test_minus(self, space):
w_res = space.execute("return [1, 1, 2, '3'] - [1, '3']")
assert self.unwrap(space, w_res) == [2]
def test_lshift(self, space):
w_res = space.execute("return [] << 1")
assert self.unwrap(space, w_res) == [1]
def test_concat(self, space):
w_res = space.execute("""
a = [1, 2]
b = a.concat([3, 4])
return a, a == b
""")
assert self.unwrap(space, w_res) == [[1, 2, 3, 4], True]
def test_zip(self, space):
w_res = space.execute("return [1, 2, 3].zip([3, 2, 1])")
assert self.unwrap(space, w_res) == [[1, 3], [2, 2], [3, 1]]
def test_product(self, space):
w_res = space.execute("return [1, 2].product([3, 4])")
assert self.unwrap(space, w_res) == [[1, 3], [1, 4], [2, 3], [2, 4]]
def test_size(self, space):
w_res = space.execute("return [1, 2].size")
assert space.int_w(w_res) == 2
def test_range_inclusive(self, space):
w_res = space.execute("return [1, 2, 3, 4, 5][1..2]")
assert self.unwrap(space, w_res) == [2, 3]
w_res = space.execute("return [1, 2, 3, 4, 5][1..-1]")
assert self.unwrap(space, w_res) == [2, 3, 4, 5]
w_res = space.execute("return [1, 2, 3, 4, 5][-2..-1]")
assert self.unwrap(space, w_res) == [4, 5]
w_res = space.execute("return [][-1..-2]")
assert w_res == space.w_nil
w_res = space.execute("return [][0..-2]")
assert self.unwrap(space, w_res) == []
w_res = space.execute("return [1, 2][-1..-2]")
assert self.unwrap(space, w_res) == []
w_res = space.execute("""
class String; def to_int; 1; end; end
return [1, 2, 3, 4, 5]["1".."1"]
""")
assert self.unwrap(space, w_res) == [2]
def test_range_exclusive(self, space):
w_res = space.execute("return [1, 2, 3, 4, 5][1...3]")
assert self.unwrap(space, w_res) == [2, 3]
w_res = space.execute("return [1, 2, 3, 4, 5][1...-1]")
assert self.unwrap(space, w_res) == [2, 3, 4]
w_res = space.execute("return [1, 2, 3, 4, 5][-2...-1]")
assert self.unwrap(space, w_res) == [4]
def test_range_assignment(self, space):
w_res = space.execute("x = [1, 2, 3]; x[1..2] = 4; return x")
assert self.unwrap(space, w_res) == [1, 4]
w_res = space.execute("x = [1, 2, 3]; x[1..-2] = 4; return x")
assert self.unwrap(space, w_res) == [1, 4, 3]
w_res = space.execute("x = [1, 2, 3]; x[-3..-2] = 4; return x")
assert self.unwrap(space, w_res) == [4, 3]
w_res = space.execute("x = [1, 2, 3]; x[-1..-2] = 4; return x")
assert self.unwrap(space, w_res) == [1, 2, 4, 3]
w_res = space.execute("x = [1, 2, 3]; x[1..-2] = []; return x")
assert self.unwrap(space, w_res) == [1, 3]
w_res = space.execute("x = [1, 2, 3]; x[1..-2] = [4]; return x")
assert self.unwrap(space, w_res) == [1, 4, 3]
w_res = space.execute("x = [1, 2, 3]; x[1..-2] = [4, 5]; return x")
assert self.unwrap(space, w_res) == [1, 4, 5, 3]
def test_at(self, space):
w_res = space.execute("return [1, 2, 3, 4, 5].at(2)")
assert space.int_w(w_res) == 3
def test_unshift(self, space):
w_res = space.execute("return [1, 2].unshift(3, 4)")
assert self.unwrap(space, w_res) == [3, 4, 1, 2]
def test_join(self, space):
w_res = space.execute("return [1, 'a', :b].join")
assert space.str_w(w_res) == "1ab"
w_res = space.execute("return [1, 'a', :b].join('--')")
assert space.str_w(w_res) == "1--a--b"
w_res = space.execute("return [1, 'a', :b].join(?-)")
assert space.str_w(w_res) == "1-a-b"
with self.raises(space, "TypeError", "can't convert Symbol into String"):
space.execute("[1].join(:foo)")
w_res = space.execute("return [].join(:foo)")
assert space.str_w(w_res) == ""
w_res = space.execute("""
class A; def to_str; 'A'; end; end
return [1, 2].join(A.new)
""")
assert space.str_w(w_res) == "1A2"
def test_dup(self, space):
w_res = space.execute("""
x = [1, 2, 3]
y = x.dup
x << 4
return [x, y]
""")
x, y = self.unwrap(space, w_res)
assert x == [1, 2, 3, 4]
assert y == [1, 2, 3]
def test_compact(self, space):
w_res = space.execute("return ['a', nil, 'b', nil, 'c'].compact")
assert self.unwrap(space, w_res) == ['a', 'b', 'c']
def test_rejectbang(self, space):
w_res = space.execute("return [1, 2, 3, 4].reject! { false }")
assert w_res == space.w_nil
w_res = space.execute("return [1, 2, 3, 4].reject! { true }")
assert space.listview(w_res) == []
def test_delete_if(self, space):
w_res = space.execute("""
a = [1, 2, 3]
a.delete_if { true }
return a
""")
assert self.unwrap(space, w_res) == []
w_res = space.execute("""
a = [1, 2, 3, 4]
return a.delete_if {|x| x > 2 }
""")
assert self.unwrap(space, w_res) == [1, 2]
w_res = space.execute("""
a = [1, 2, 3, 4]
return a.delete_if {|x| x == 2 || x == 4 }
""")
assert self.unwrap(space, w_res) == [1, 3]
w_res = space.execute("""
a = [1, 2, 3, 4]
return a.delete_if {|x| x == 1 || x == 3 }
""")
assert self.unwrap(space, w_res) == [2, 4]
def test_pop(self, space):
assert self.unwrap(space, space.execute("return [1, 2, 3].pop")) == 3
assert self.unwrap(space, space.execute("return [1, 2, 3].pop(0)")) == []
assert self.unwrap(space, space.execute("return [1, 2, 3].pop(1)")) == [3]
assert self.unwrap(space, space.execute("return [1, 2, 3].pop(2)")) == [2, 3]
assert self.unwrap(space, space.execute("return [1, 2, 3].pop(10)")) == [1, 2, 3]
assert self.unwrap(space, space.execute("return [].pop(1)")) == []
assert self.unwrap(space, space.execute("return [].pop")) is None
with self.raises(space, "ArgumentError"):
space.execute("[1].pop(-1)")
with self.raises(space, "TypeError"):
space.execute("[1].pop('a')")
def test_delete(self, space):
w_res = space.execute("""
a = [ "a", "b", "b", "b", "c" ]
r = []
r << a.delete("b")
r << a
r << a.delete("z")
r << a.delete("z") { "not found" }
return r
""")
assert self.unwrap(space, w_res) == ["b", ["a", "c"], None, "not found"]
def test_delete_at(self, space):
w_res = space.execute("""
res = []
a = ["ant", "bat", "cat", "dog"]
res << a.delete_at(2) #=> "cat"
res << a #=> ["ant", "bat", | |
<reponame>JediKoder/coursera-CodeMatrix
# version code 9913d98afecb+
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
from vecutil import list2vec
from GF2 import one
from solver import solve
from matutil import listlist2mat, coldict2mat
from mat import Mat
from vec import Vec
## 1: (Problem 1) Iterative Exchange Lemma
w0 = list2vec([1,0,0])
w1 = list2vec([0,1,0])
w2 = list2vec([0,0,1])
v0 = list2vec([1,2,3])
v1 = list2vec([1,3,3])
v2 = list2vec([0,3,3])
# Fill in exchange_S1 and exchange_S2
# with appropriate lists of 3 vectors
exchange_S0 = [w0, w1, w2]
exchange_S1 = [...]
exchange_S2 = [...]
exchange_S3 = [v0, v1, v2]
## 2: (Problem 2) Another Iterative Exchange Lemma
w0 = list2vec([0,one,0])
w1 = list2vec([0,0,one])
w2 = list2vec([one,one,one])
v0 = list2vec([one,0,one])
v1 = list2vec([one,0,0])
v2 = list2vec([one,one,0])
exchange_2_S0 = [w0, w1, w2]
exchange_2_S1 = [...]
exchange_2_S2 = [...]
exchange_2_S3 = [v0, v1, v2]
## 3: (Problem 3) Morph Lemma Coding
def morph(S, B):
'''
Input:
- S: a list of distinct Vecs
- B: a list of linearly independent Vecs all in Span S
Output: a list of pairs of vectors to inject and eject (see problem description)
Example:
>>> # This is how our morph works. Yours may yield different results.
>>> # Note: Make a copy of S to modify instead of modifying S itself.
>>> from vecutil import list2vec
>>> from vec import Vec
>>> S = [list2vec(v) for v in [[1,0,0],[0,1,0],[0,0,1]]]
>>> B = [list2vec(v) for v in [[1,1,0],[0,1,1],[1,0,1]]]
>>> D = {0, 1, 2}
>>> morph(S, B) == [(Vec(D,{0: 1, 1: 1, 2: 0}), Vec(D,{0: 1, 1: 0, 2: 0})), (Vec(D,{0: 0, 1: 1, 2: 1}), Vec(D,{0: 0, 1: 1, 2: 0})), (Vec(D,{0: 1, 1: 0, 2: 1}), Vec(D,{0: 0, 1: 0, 2: 1}))]
True
>>> S == [list2vec(v) for v in [[1,0,0],[0,1,0],[0,0,1]]]
True
>>> B == [list2vec(v) for v in [[1,1,0],[0,1,1],[1,0,1]]]
True
>>> from GF2 import one
>>> D = {0, 1, 2, 3, 4, 5, 6, 7}
>>> S = [Vec(D,{1: one, 2: one, 3: one, 4: one}), Vec(D,{1: one, 3: one}), Vec(D,{0: one, 1: one, 3: one, 5: one, 6: one}), Vec(D,{3: one, 4: one}), Vec(D,{3: one, 5: one, 6: one})]
>>> B = [Vec(D,{2: one, 4: one}), Vec(D,{0: one, 1: one, 2: one, 3: one, 4: one, 5: one, 6: one}), Vec(D,{0: one, 1: one, 2: one, 5: one, 6: one})]
>>> sol = morph(S, B)
>>> sol == [(B[0],S[0]), (B[1],S[2]), (B[2],S[3])] or sol == [(B[0],S[1]), (B[1],S[2]), (B[2],S[3])]
True
>>> # Should work the same regardless of order of S
>>> from random import random
>>> sol = morph(sorted(S, key=lambda x:random()), B)
>>> sol == [(B[0],S[0]), (B[1],S[2]), (B[2],S[3])] or sol == [(B[0],S[1]), (B[1],S[2]), (B[2],S[3])]
True
'''
pass
## 4: (Problem 4) Row and Column Rank Practice
# Please express each solution as a list of Vecs
row_space_1 = [...]
col_space_1 = [...]
row_space_2 = [...]
col_space_2 = [...]
row_space_3 = [...]
col_space_3 = [...]
row_space_4 = [...]
col_space_4 = [...]
## 5: (Problem 5) Subset Basis
def subset_basis(T):
'''
Input:
- T: a set of Vecs
Output:
- set S containing Vecs from T that is a basis for Span T.
Examples:
The following tests use the procedure is_independent, provided in module independence
>>> from vec import Vec
>>> from independence import is_independent
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})
>>> sb = subset_basis({a0, a1, a2, a3})
>>> len(sb)
3
>>> all(v in [a0, a1, a2, a3] for v in sb)
True
>>> is_independent(sb)
True
>>> b0 = Vec({0,1,2,3},{0:2,1:2,3:4})
>>> b1 = Vec({0,1,2,3},{0:1,1:1})
>>> b2 = Vec({0,1,2,3},{2:3,3:4})
>>> b3 = Vec({0,1,2,3},{3:3})
>>> sb = subset_basis({b0, b1, b2, b3})
>>> len(sb)
3
>>> all(v in [b0, b1, b2, b3] for v in sb)
True
>>> is_independent(sb)
True
>>> D = {'a','b','c','d'}
>>> c0, c1, c2, c3, c4 = Vec(D,{'d': one, 'c': one}), Vec(D,{'d': one, 'a': one, 'c': one, 'b': one}), Vec(D,{'a': one}), Vec(D,{}), Vec(D,{'d': one, 'a': one, 'b': one})
>>> subset_basis({c0,c1,c2,c3,c4}) == {c0,c1,c2,c4}
True
'''
pass
## 6: (Problem 6) Superset Basis Lemma in Python
def superset_basis(C, T):
'''
Input:
- C: linearly independent set of Vecs
- T: set of Vecs such that every Vec in C is in Span(T)
Output:
Linearly independent set S consisting of all Vecs in C and some in T
such that the span of S is the span of T (i.e. S is a basis for the span
of T).
Example:
>>> from vec import Vec
>>> from independence import is_independent
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})
>>> sb = superset_basis({a0, a3}, {a0, a1, a2})
>>> a0 in sb and a3 in sb
True
>>> is_independent(sb)
True
>>> all(x in [a0,a1,a2,a3] for x in sb)
True
'''
pass
## 7: (Problem 7) My Is Independent Procedure
def my_is_independent(L):
'''
Input:
- L: a list of Vecs
Output:
- boolean: true if the list is linearly independent
Examples:
>>> D = {0, 1, 2}
>>> L = [Vec(D,{0: 1}), Vec(D,{1: 1}), Vec(D,{2: 1}), Vec(D,{0: 1, 1: 1, 2: 1}), Vec(D,{0: 1, 1: 1}), Vec(D,{1: 1, 2: 1})]
>>> my_is_independent(L)
False
>>> my_is_independent(L[:2])
True
>>> my_is_independent(L[:3])
True
>>> my_is_independent(L[1:4])
True
>>> my_is_independent(L[0:4])
False
>>> my_is_independent(L[2:])
False
>>> my_is_independent(L[2:5])
False
>>> L == [Vec(D,{0: 1}), Vec(D,{1: 1}), Vec(D,{2: 1}), Vec(D,{0: 1, 1: 1, 2: 1}), Vec(D,{0: 1, 1: 1}), Vec(D,{1: 1, 2: 1})]
True
'''
pass
## 8: (Problem 8) My Rank
def my_rank(L):
'''
Input:
- L: a list of Vecs
Output:
- the rank of the list of Vecs
Example:
>>> L = [list2vec(v) for v in [[1,2,3],[4,5,6],[1.1,1.1,1.1]]]
>>> my_rank(L)
2
>>> L == [list2vec(v) for v in [[1,2,3],[4,5,6],[1.1,1.1,1.1]]]
True
>>> my_rank([list2vec(v) for v in [[1,1,1],[2,2,2],[3,3,3],[4,4,4],[123,432,123]]])
2
'''
pass
## 9: (Problem 9) Direct Sum Unique Representation
def direct_sum_decompose(U_basis, V_basis, w):
'''
Input:
- U_basis: a list of Vecs forming a basis for a vector space U
- V_basis: a list of Vecs forming a basis for a vector space V
- w: a Vec in the direct sum of U and V
Output:
- a pair (u, v) such that u + v = w, u is in U, v is in V
Example:
>>> D = {0,1,2,3,4,5}
>>> U_basis = [Vec(D,{0: 2, 1: 1, 2: 0, 3: 0, 4: 6, 5: 0}), Vec(D,{0: 11, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0}), Vec(D,{0: 3, 1: 1.5, 2: 0, 3: 0, 4: 7.5, 5: 0})]
>>> V_basis = [Vec(D,{0: 0, 1: 0, 2: 7, 3: 0, 4: 0, 5: 1}), Vec(D,{0: 0, 1: 0, 2: 15, 3: 0, 4: 0, 5: 2})]
>>> w = Vec(D,{0: 2, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0})
>>> (u, v) = direct_sum_decompose(U_basis, V_basis, w)
>>> (u + v - w).is_almost_zero()
True
>>> U_matrix = coldict2mat(U_basis)
>>> V_matrix = coldict2mat(V_basis)
>>> (u - U_matrix*solve(U_matrix, u)).is_almost_zero()
True
>>> (v - V_matrix*solve(V_matrix, v)).is_almost_zero()
True
>>> ww = Vec(D,{0: 2, 1: 5, 2: 51, 4: 1, 5: 7})
>>> (u, v) = direct_sum_decompose(U_basis, V_basis, ww)
>>> (u + v - ww).is_almost_zero()
True
>>> (u - U_matrix*solve(U_matrix, u)).is_almost_zero()
True
>>> (v - V_matrix*solve(V_matrix, v)).is_almost_zero()
True
>>> U_basis == [Vec(D,{0: 2, 1: 1, 2: 0, 3: 0, 4: 6, 5: 0}), Vec(D,{0: 11, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0}), Vec(D,{0: 3, 1: 1.5, 2: 0, 3: 0, 4: 7.5, 5: 0})]
True
>>> V_basis == [Vec(D,{0: 0, 1: 0, 2: 7, 3: 0, 4: 0, 5: 1}), Vec(D,{0: 0, 1: 0, 2: 15, 3: 0, 4: 0, 5: 2})]
True
>>> w == Vec(D,{0: 2, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0})
True
'''
pass
## 10: (Problem 10) Is Invertible Function
def is_invertible(M):
'''
input: A matrix, M
outpit: A boolean indicating if M is invertible.
>>> M = Mat(({0, 1, 2, 3}, {0, 1, 2, 3}), {(0, 1): 0, (1, 2): 1, (3, 2): 0, (0, 0): 1, (3, 3): 4, (3, 0): 0, (3, 1): 0, | |
<reponame>gandhiy/lipMIP
""" General all-purpose utilities """
import sys
import torch
import torch.nn.functional as F
import numpy as np
import gurobipy as gb
import matplotlib.pyplot as plt
import io
import contextlib
import tempfile
import time
import re
import pickle
import inspect
import glob
import os
COMPLETED_JOB_DIR = os.path.join(os.path.dirname(__file__), 'jobs', 'completed')
# ===============================================================================
# = Helpful all-purpose functions =
# ===============================================================================
class ParameterObject:
def __init__(self, **kwargs):
self.attr_list = []
assert 'attr_list' not in kwargs
for k,v in kwargs.items():
setattr(self, k, v)
self.attr_list.append(k)
def change_attrs(self, **kwargs):
new_kwargs = {}
for attr in self.attr_list:
if attr in kwargs:
new_kwargs[attr] = kwargs[attr]
else:
new_kwargs[attr] = getattr(self, attr)
return self.__class__(**new_kwargs)
class Factory(ParameterObject):
def __init__(self, constructor, **kwargs):
self.constructor = constructor
super(Factory, self).__init__(**kwargs)
def __call__(self, **kwargs):
cons_args = inspect.getfullargspec(self.constructor).args
# Make default args from attributes
args = {k: getattr(self, k) for k in self.attr_list if k in cons_args}
# Update the default args
for k,v in kwargs.items():
if k in cons_args:
args[k] = v
# Build object
return self.constructor(**args)
def __repr__(self):
return '<Factory: %s>' % self.constructor.__self__.__name__
class DoEvery:
@classmethod
def dummy(cls, *args, **kwargs):
pass
def __init__(self, func, freq):
""" Simple class that holds onto a function and it returns
this function every freq iterations
ARGS:
func: function object to be returned every freq iterations
freq: int - how often to return the function
"""
self.func = func
self.freq = freq
self.i = 0
def __call__(self, *args, **kwargs):
if self.i % self.freq == 0:
returner = self.func
else:
returner = self.dummy
self.i += 1
return returner(*args, **kwargs)
class Timer:
def __init__(self, start_on_init=True):
if start_on_init:
self.start()
def start(self):
self.start_time = time.time()
def stop(self):
self.stop_time = time.time()
return self.stop_time - self.start_time
def reset(self):
self.start_time = self.stop_time = None
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
return [_.cpu() for _ in tensor_iter]
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
def prod(num_iter):
""" returns product of all elements in this iterator *'ed together"""
cumprod = 1
for el in num_iter:
cumprod *= el
return cumprod
def partition(n, m):
""" Given ints n > m, partitions n into an iterable where all
elements are m, except for the last one which is (n % m)
"""
count = 0
while count < n:
yield min([m, n - count])
count += m
def flatten_list(lol):
""" Given list of lists, flattens it into a single list. """
output = []
for el in lol:
if not isinstance(el, list):
output.append(el)
continue
output.extend(flatten_list(el))
return output
def partition_by_suffix(iterable, func):
""" Given an iterable and a boolean-valued function which takes in
elements of that iterable, outputs a list of lists, where each list
ends in an element for which the func returns true, (except for the
last one)
e.g.
iterable := [1, 2, 3, 4, 5,5, 5]
func := lambda x: (x % 2) == 0
returns [[1,2], [3,4], [5, 5, 5]]
"""
output = []
sublist = []
for el in iterable:
sublist.append(el)
if func(el):
output.append(sublist)
sublist = []
if len(sublist) > 0:
output.append(sublist)
return output
def arraylike(obj):
return isinstance(obj, (torch.Tensor, np.ndarray))
def as_numpy(tensor_or_array):
""" If given a tensor or numpy array returns that object cast numpy array
"""
if isinstance(tensor_or_array, torch.Tensor):
tensor_or_array = tensor_or_array.cpu().detach().numpy()
return tensor_or_array
def two_col(l, r):
""" Takes two numpy arrays of size N and makes a numpy array of size Nx2
"""
return np.vstack([l, r]).T
def split_pos_neg(x):
if isinstance(x, torch.Tensor):
return split_tensor_pos_neg(x)
else:
return split_ndarray_pos_neg(x)
def split_tensor_pos_neg(x):
""" Splits a tensor into positive and negative components """
pos = F.relu(x)
neg = -F.relu(-x)
return pos, neg
def split_ndarray_pos_neg(x):
""" Splits a numpy ndarray into positive and negative components """
pos = x * (x >= 0)
neg = x * (x <= 0)
return pos, neg
def swap_axes(x, source, dest):
""" Swaps the dimensions of source <-> dest for torch/numpy
ARGS:
x : numpy array or tensor
source : int index
dest : int index
RETURNS
x' - object with same data as x, but with axes swapped
"""
if isinstance(x, torch.Tensor):
return x.transpose(source, dest)
else:
return np.moveaxis(x, source, dest)
def build_var_namer(k):
return lambda d: '%s[%s]' % (k, d)
@contextlib.contextmanager
def silent():
save_stdout = sys.stdout
temp = tempfile.TemporaryFile(mode='w')
sys.stdout = temp
yield
sys.stdout = save_stdout
temp.close()
def ia_mm(matrix, intervals, lohi_dim, matrix_or_vec='matrix'):
""" Interval analysis matrix(-vec) multiplication for torch/np intervals
ARGS:
matrix : tensor or numpy array of shape (m,n) -
intervals : tensor or numpy array with shape (n1, ..., 2, n_i, ...) -
"vector" of intervals to be multiplied by a matrix
one such n_i must be equal to n (from matrix shape)
lohi_dim : int - which dimension (index) of intervals corresponds
to the lo/hi split
matrix_or_vec : string - must be matrix or vec, corresponds to whether
intervals is to be treated as a matrix or a vector.
If a v
RETURNS:
object of same type as intervals, but with the shape slightly
different: len(output[-1/-2]) == m
"""
# asserts for shapes and things
assert isinstance(matrix, torch.Tensor) # TENSOR ONLY FOR NOW
assert isinstance(intervals, torch.Tensor)
m, n = matrix.shape
assert intervals.shape[lohi_dim] == 2
assert matrix_or_vec in ['matrix', 'vec']
if matrix_or_vec == 'vec':
intervals = intervals.unsqueeze(-1)
assert lohi_dim != intervals.dim() - 2
assert intervals[dim][-2] == n
# define operators based on tensor/numpy case
matmul = lambda m, x: m.matmul(x)
stack = lambda a, b: torch.stack([a, b])
# now do IA stuff
intervals = swap_axes(intervals, 0, lohi_dim)
matrix_pos, matrix_neg = split_pos_neg(matrix)
los, his = intervals
new_los = matmul(matrix_pos, los) + matmul(matrix_neg, his)
new_his = matmul(matrix_pos, his) + matmul(matrix_neg, los)
intervals = swap_axes(stack(new_los, new_his), 0, lohi_dim)
if matrix_or_vec == 'vec':
intervals = interval.squeeze(-1)
return intervals
# =============================================================================
# = Image display functions =
# =============================================================================
def display_images(image_rows, figsize=(8, 8)):
""" Given either a tensor/np.array (or list of same), will display each
element in the row or tensor
ARGS:
image_rows: tensor or np.array or tensor[], np.array[] -
image or list of images to display
RETURNS: None, but displays images
"""
if not isinstance(image_rows, list):
image_rows = [image_rows]
np_rows = [as_numpy(row) for row in image_rows]
# Transpose channel to last dimension and stack to make rows
np_rows = [np.concatenate(_.transpose([0, 2, 3, 1]), axis=1)
for _ in np_rows]
# Now stack rows
full_image = np.concatenate(np_rows, axis=0)
# And then show image
imshow_kwargs = {}
if full_image.shape[-1] == 1:
full_image = full_image.squeeze()
imshow_kwargs['cmap'] = 'gray'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.axis('off')
ax.imshow(full_image, **imshow_kwargs)
plt.show()
# ======================================================
# = Pytorch helpers =
# ======================================================
def seq_append(seq, module):
""" Takes a nn.sequential and a nn.module and creates a nn.sequential
with the module appended to it
ARGS:
seq: nn.Sequntial object
module: <inherits nn.Module>
RETURNS:
nn.Sequential object
"""
seq_modules = [seq[_] for _ in range(len(seq))] + [module]
return nn.Sequential(*seq_modules)
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
output = []
for el in tensor_iter:
if isinstance(el, tuple):
output.append(tuple(_.cpu() for _ in el))
else:
output.append(el.cpu())
return output
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
if isinstance(el, tuple):
return tuple(_.cuda() for _ in el)
else:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
# =======================================
# = Polytope class =
# =======================================
class Polytope:
INPUT_KEY = 'input'
SLACK_KEY = 'slack'
def __init__(self, A, b):
""" Represents a polytope of the form {x | AX <= b}
(where everything is a numpy array)
"""
self.A = A
self.b = b
def _input_from_model(self, model):
var_namer = build_var_namer(self.INPUT_KEY)
return np.array([model.getVarByName(var_namer(i)).X
for i in range(self.A.shape[1])])
def _build_model(self, slack=False):
""" Builds a gurobi model of this object """
with silent():
model = gb.Model()
input_namer = build_var_namer(self.INPUT_KEY)
input_vars = [model.addVar(lb=-gb.GRB.INFINITY, ub=gb.GRB.INFINITY,
name=input_namer(i))
for i in range(self.A.shape[1])]
if slack == True:
slack_var = model.addVar(lb=0, ub=1.0, name=self.SLACK_KEY)
else:
slack_var = 0
for i, row in enumerate(self.A):
model.addConstr(gb.LinExpr(row, input_vars) + slack_var <= self.b[i])
model.update()
return model
def contains(self, x, tolerance=1e-6):
return all(self.A @ x <= self.b + tolerance)
def interior_point(self):
model = self._build_model(slack=True)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
def intersects_hbox(self, hbox):
""" If this intersects a given hyperbox, returns a
point contained in both
"""
model = self._build_model(slack=True)
input_namer = build_var_namer(self.INPUT_KEY)
for i, (lb, ub) in enumerate(hbox):
var = model.getVarByName(input_namer(i))
model.addConstr(lb <= var <= ub)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
# =========================================================
# = experiment.Result object helpers =
# =========================================================
def filename_to_epoch(filename):
return int(re.search(r'_EPOCH\d{4}_', filename).group()[-5:-1])
def read_result_files(result_files):
output = []
for result_file in result_files:
try:
with open(result_file, 'rb') as f:
output.append((result_file, pickle.load(f)))
except Exception as err:
print("Failed on file: ", result_file, err)
return output
def job_out_series(job_outs, eval_style, method,
value_or_time='value', avg_stdev='avg'):
""" Takes in some result or resultList objects and
a 'method', and desired object, and returns these objects
in a list
ARGS:
results: Result[] or ResultList[], results to consider
eval_style: str - which method of Experiment we look at
method: str - which Lipschitz-estimation technique to consider
value_or_time: 'value' or 'time' - which number to return
avg_stdev: 'avg' or 'stdev' - for ResultList[], we can
get average or stdev values
RETURNS:
list of floats
"""
# check everything is the same type
assert value_or_time in ['value', 'time']
assert avg_stdev in ['avg', 'stdev']
assert eval_style in ['do_random_evals', 'do_unit_hypercube_eval',
'do_data_evals', 'do_large_radius_evals']
results = [job_out[eval_style] for job_out in job_outs]
output = []
for result in results:
try: #Result object case
if value_or_time == 'value':
output.append(result.values(method))
else:
output.append(result.compute_times(method))
except:
triple = result.average_stdevs(value_or_time)[method]
if avg_stdev == 'avg':
output.append(triple[0])
else:
output.append(triple[1])
return output
def collect_result_outs(filematch):
""" Uses glob to collect and load result objects matching a series
ARGS:
filematch: string with *'s associated with it
e.g. | |
"""helper functions for Microsoft Graph"""
# Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license.
# See LICENSE in the project root for license information.
# Note: This started from an ADAL sample and is being customized once I learned
# ADAL Python sucks and MSAL Python is the way to go (but didn't provide samples
# as nice as this one)
import base64
import mimetypes
import os
import sys
import urllib
import webbrowser
import json
import logging
# import adal
import msal
# import pyperclip
import requests
import atexit
import pickle
import datetime
from . import config
def api_endpoint(url):
"""Convert a relative path such as /me/photo/$value to a full URI based
on the current RESOURCE and API_VERSION settings in config.py.
"""
if urllib.parse.urlparse(url).scheme in ["http", "https"]:
return url # url is already complete
return urllib.parse.urljoin(
f"{config.RESOURCE}/{config.API_VERSION}/", url.lstrip("/")
)
# def refresh_flow_session_adal(client_id, refresh_token):
# """Obtain an access token from Azure AD (via device flow) and create
# a Requests session instance ready to make authenticated calls to
# Microsoft Graph.
# client_id = Application ID for registered "Azure AD only" V1-endpoint app
# refresh_token = existing token stored somewhere that we should try to open
# Returns Requests session object if user signed in successfully. The session
# includes the access token in an Authorization header.
# User identity must be an organizational account (ADAL does not support MSAs).
# """
# ctx = adal.AuthenticationContext(config.AUTHORITY_URL, api_version=None)
# token_response = ctx.acquire_token_with_refresh_token(
# refresh_token, client_id, config.RESOURCE
# )
# if not token_response.get("accessToken", None):
# return None
# session = requests.Session()
# session.headers.update(
# {
# "Authorization": f'Bearer {token_response["accessToken"]}',
# "SdkVersion": "sample-python-adal",
# "x-client-SKU": "sample-python-adal",
# }
# )
# return session
# def device_flow_session_adal(client_id, *, auto=False, secret=None):
# """Obtain an access token from Azure AD (via device flow) and create
# a Requests session instance ready to make authenticated calls to
# Microsoft Graph.
# client_id = Application ID for registered "Azure AD only" V1-endpoint app
# auto = whether to copy device code to clipboard and auto-launch browser
# Returns Requests session object if user signed in successfully. The session
# includes the access token in an Authorization header.
# User identity must be an organizational account (ADAL does not support MSAs).
# """
# ctx = adal.AuthenticationContext(config.AUTHORITY_URL, api_version=None)
# device_code = ctx.acquire_user_code(config.RESOURCE, client_id)
# # display user instructions
# if auto:
# pyperclip.copy(device_code["user_code"]) # copy user code to clipboard
# webbrowser.open(device_code["verification_url"]) # open browser
# print(
# f'The code {device_code["user_code"]} has been copied to your clipboard, '
# f'and your web browser is opening {device_code["verification_url"]}. '
# "Paste the code to sign in."
# )
# else:
# print(device_code["message"])
# token_response = ctx.acquire_token_with_device_code(
# config.RESOURCE, device_code, client_id
# )
# if not token_response.get("accessToken", None):
# return None
# refresh_token = token_response.get("refreshToken", None)
# session = requests.Session()
# session.headers.update(
# {
# "Authorization": f'Bearer {token_response["accessToken"]}',
# "SdkVersion": "sample-python-adal",
# "x-client-SKU": "sample-python-adal",
# }
# )
# return (session, refresh_token)
# def tryLoginAdal(client_id):
# """
# This will try to look for an existing refresh token in a pickle,
# then refresh that login first with ADAL login before
# going into the device flow session.
# """
# refresh_token = None
# path = "microsoft.pickle"
# if os.path.exists(path):
# with open(path, "rb") as token:
# refresh_token = pickle.load(token)
# session = None
# if refresh_token:
# session = refresh_flow_session_adal(client_id, refresh_token)
# if not session:
# session, refresh_token = device_flow_session_adal(client_id, auto=True)
# # Save the credentials for the next run
# with open(path, "wb") as token:
# pickle.dump(refresh_token, token)
# return session
def device_flow_session_msal(client_id, scope):
"""Obtain an access token from Microsoft (via device flow) and create
a Requests session instance ready to make authenticated calls to
Microsoft Graph.
client_id = Application ID
scope = what we want access to
Returns Requests session object if user signed in successfully. The session
includes the access token in an Authorization header.
Adapted from ADAL sample and otherwise taken from https://github.com/AzureAD/microsoft-authentication-library-for-python/blob/dev/sample/device_flow_sample.py
"""
cache = msal.SerializableTokenCache()
CACHE_FILE = "microsoft.bin"
if os.path.exists(CACHE_FILE):
cache.deserialize(open(CACHE_FILE, "r").read())
atexit.register(
lambda: open(CACHE_FILE, "w").write(cache.serialize())
# Hint: The following optional line persists only when state changed
if cache.has_state_changed
else None
)
app = msal.PublicClientApplication(
client_id, authority=config.AUTHORITY_URL, token_cache=cache
)
result = None
accounts = app.get_accounts()
if accounts:
logging.info("Account(s) exists in cache, probably with token too. Let's try.")
# NOTE: this is dumb and doesn't even check the user choice, comment it, take the first one instead
# print("Pick the account you want to use to proceed:")
# for a in accounts:
# print(a["username"])
# # Assuming the end user chose this one
chosen = accounts[0]
# Now let's try to find a token in cache for this account
result = app.acquire_token_silent(scope, account=chosen)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
flow = app.initiate_device_flow(scopes=scope)
if "user_code" not in flow:
raise ValueError(
"Fail to create device flow. Err: %s" % json.dumps(flow, indent=4)
)
print(flow["message"])
sys.stdout.flush() # Some terminal needs this to ensure the message is shown
# Ideally you should wait here, in order to save some unnecessary polling
# input("Press Enter after signing in from another device to proceed, CTRL+C to abort.")
result = app.acquire_token_by_device_flow(flow) # By default it will block
# You can follow this instruction to shorten the block time
# https://msal-python.readthedocs.io/en/latest/#msal.PublicClientApplication.acquire_token_by_device_flow
# or you may even turn off the blocking behavior,
# and then keep calling acquire_token_by_device_flow(flow) in your own customized loop.
if "access_token" in result:
session = requests.Session()
session.headers.update({"Authorization": f'Bearer {result["access_token"]}'})
return session
else:
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id")) # You may need this when reporting a bug
return None
def profile_photo(session, *, user_id="me", save_as=None):
"""Get profile photo, and optionally save a local copy.
session = requests.Session() instance with Graph access token
user_id = Graph id value for the user, or 'me' (default) for current user
save_as = optional filename to save the photo locally. Should not include an
extension - the extension is determined by photo's content type.
Returns a tuple of the photo (raw data), HTTP status code, content type, saved filename.
"""
endpoint = "me/photo/$value" if user_id == "me" else f"users/{user_id}/$value"
photo_response = session.get(api_endpoint(endpoint), stream=True)
photo_status_code = photo_response.status_code
if photo_response.ok:
photo = photo_response.raw.read()
# note we remove /$value from endpoint to get metadata endpoint
metadata_response = session.get(api_endpoint(endpoint[:-7]))
content_type = metadata_response.json().get("@odata.mediaContentType", "")
else:
photo = ""
content_type = ""
if photo and save_as:
extension = content_type.split("/")[1]
filename = save_as + "." + extension
with open(filename, "wb") as fhandle:
fhandle.write(photo)
else:
filename = ""
return (photo, photo_status_code, content_type, filename)
def get_user(session, *, user_id="me"):
"""List email from current user.
session = requests.Session() instance with Graph access token
user_id = Graph id value for the user, or 'me' (default) for current user
search = optional text to search for
Returns the whole JSON for the message request
"""
# MAIL_QUERY = 'https://graph.microsoft.com/v1.0/me/messages?$search="{query}"'
endpoint = "me" if user_id == "me" else f"users/{user_id}"
response = session.get(api_endpoint(endpoint))
response.raise_for_status()
return response.json()
def get_mail(session, *, user_id="me", mailid):
"""Get a mail message
session = requests.Session() instance with Graph access token
user_id = Graph id value for the user, or 'me' (default) for current user
Returns the whole JSON for the message request
"""
# MAIL_QUERY = 'https://graph.microsoft.com/v1.0/me/messages/id'
endpoint = (
f"me/messages/{mailid}"
if user_id == "me"
else f"users/{user_id}/messages/{mailid}"
)
response = session.get(
api_endpoint(endpoint), headers={"Prefer": 'outlook.body-content-type="text"'}
)
response.raise_for_status()
return response.json()
def list_mail(
session, *, user_id="me", folder=None, search=None, filter=None, select=None
):
"""List email from current user.
session = requests.Session() instance with Graph access token
user_id = Graph id value for the user, or 'me' (default) for current user
search = optional text to search for
filter = optional filters to apply to search
select = reduce result to only some columns
Returns the whole JSON for the message request
"""
# MAIL_QUERY = 'https://graph.microsoft.com/v1.0/me/messages?$search="{query}"'
if not folder:
endpoint = "me/messages" if user_id == "me" else f"users/{user_id}/messages"
else:
endpoint = (
f"me/mailFolders/{folder}/messages"
if user_id == "me"
else f"users/{user_id}/mailFolders/{folder}/messages"
)
argsApplied = False
if search:
if not argsApplied:
endpoint += "?"
argsApplied = True
else:
endpoint += "&"
endpoint += '$search="%s"' % search
if filter:
if not argsApplied:
endpoint += "?"
argsApplied = True
else:
endpoint += "&"
endpoint += '$filter="%s"' % filter
if select:
if not argsApplied:
endpoint += "?"
argsApplied = True
else:
endpoint += "&"
endpoint += f"$select={select}"
response = session.get(
api_endpoint(endpoint), headers={"Prefer": 'outlook.body-content-type="text"'}
)
response.raise_for_status()
return response.json()
def list_mail_folders(session, *, user_id="me"):
"""List mail folders from current user.
session = requests.Session() instance with Graph access | |
:param is_spot: :code:`<code>true</code>` if App Service plan is for Spot instances; otherwise,
:code:`<code>false</code>`.
:type is_spot: bool
:param capacity: Target capacity of the App Service plan (number of VMs).
:type capacity: int
:param hosting_environment: Name of App Service Environment where app or App Service plan
should be created.
:type hosting_environment: str
:param is_xenon: :code:`<code>true</code>` if App Service plan is running as a windows
container.
:type is_xenon: bool
:param container_registry_base_url: Base URL of the container registry.
:type container_registry_base_url: str
:param container_registry_username: Username for to access the container registry.
:type container_registry_username: str
:param container_registry_password: Password for to access the container registry.
:type container_registry_password: str
:param container_image_repository: Repository name (image name).
:type container_image_repository: str
:param container_image_tag: Image tag.
:type container_image_tag: str
:param container_image_platform: Platform (windows or linux).
:type container_image_platform: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'location': {'required': True},
'capacity': {'minimum': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'sku_name': {'key': 'properties.skuName', 'type': 'str'},
'need_linux_workers': {'key': 'properties.needLinuxWorkers', 'type': 'bool'},
'is_spot': {'key': 'properties.isSpot', 'type': 'bool'},
'capacity': {'key': 'properties.capacity', 'type': 'int'},
'hosting_environment': {'key': 'properties.hostingEnvironment', 'type': 'str'},
'is_xenon': {'key': 'properties.isXenon', 'type': 'bool'},
'container_registry_base_url': {'key': 'properties.containerRegistryBaseUrl', 'type': 'str'},
'container_registry_username': {'key': 'properties.containerRegistryUsername', 'type': 'str'},
'container_registry_password': {'key': 'properties.containerRegistryPassword', 'type': 'str'},
'container_image_repository': {'key': 'properties.containerImageRepository', 'type': 'str'},
'container_image_tag': {'key': 'properties.containerImageTag', 'type': 'str'},
'container_image_platform': {'key': 'properties.containerImagePlatform', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "ValidateResourceTypes"],
location: str,
server_farm_id: Optional[str] = None,
sku_name: Optional[str] = None,
need_linux_workers: Optional[bool] = None,
is_spot: Optional[bool] = None,
capacity: Optional[int] = None,
hosting_environment: Optional[str] = None,
is_xenon: Optional[bool] = None,
container_registry_base_url: Optional[str] = None,
container_registry_username: Optional[str] = None,
container_registry_password: Optional[str] = None,
container_image_repository: Optional[str] = None,
container_image_tag: Optional[str] = None,
container_image_platform: Optional[str] = None,
**kwargs
):
super(ValidateRequest, self).__init__(**kwargs)
self.name = name
self.type = type
self.location = location
self.server_farm_id = server_farm_id
self.sku_name = sku_name
self.need_linux_workers = need_linux_workers
self.is_spot = is_spot
self.capacity = capacity
self.hosting_environment = hosting_environment
self.is_xenon = is_xenon
self.container_registry_base_url = container_registry_base_url
self.container_registry_username = container_registry_username
self.container_registry_password = container_registry_password
self.container_image_repository = container_image_repository
self.container_image_tag = container_image_tag
self.container_image_platform = container_image_platform
class ValidateResponse(msrest.serialization.Model):
"""Describes the result of resource validation.
:param status: Result of validation.
:type status: str
:param error: Error details for the case when validation fails.
:type error: ~azure.mgmt.web.v2020_06_01.models.ValidateResponseError
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ValidateResponseError'},
}
def __init__(
self,
*,
status: Optional[str] = None,
error: Optional["ValidateResponseError"] = None,
**kwargs
):
super(ValidateResponse, self).__init__(**kwargs)
self.status = status
self.error = error
class ValidateResponseError(msrest.serialization.Model):
"""Error details for when validation fails.
:param code: Validation error code.
:type code: str
:param message: Validation error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ValidateResponseError, self).__init__(**kwargs)
self.code = code
self.message = message
class VirtualApplication(msrest.serialization.Model):
"""Virtual application in an app.
:param virtual_path: Virtual path.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
:param preload_enabled: :code:`<code>true</code>` if preloading is enabled; otherwise,
:code:`<code>false</code>`.
:type preload_enabled: bool
:param virtual_directories: Virtual directories for virtual application.
:type virtual_directories: list[~azure.mgmt.web.v2020_06_01.models.VirtualDirectory]
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
'preload_enabled': {'key': 'preloadEnabled', 'type': 'bool'},
'virtual_directories': {'key': 'virtualDirectories', 'type': '[VirtualDirectory]'},
}
def __init__(
self,
*,
virtual_path: Optional[str] = None,
physical_path: Optional[str] = None,
preload_enabled: Optional[bool] = None,
virtual_directories: Optional[List["VirtualDirectory"]] = None,
**kwargs
):
super(VirtualApplication, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
self.preload_enabled = preload_enabled
self.virtual_directories = virtual_directories
class VirtualDirectory(msrest.serialization.Model):
"""Directory for virtual application.
:param virtual_path: Path to virtual application.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
}
def __init__(
self,
*,
virtual_path: Optional[str] = None,
physical_path: Optional[str] = None,
**kwargs
):
super(VirtualDirectory, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
class VirtualIPMapping(msrest.serialization.Model):
"""Virtual IP mapping.
:param virtual_ip: Virtual IP address.
:type virtual_ip: str
:param internal_http_port: Internal HTTP port.
:type internal_http_port: int
:param internal_https_port: Internal HTTPS port.
:type internal_https_port: int
:param in_use: Is virtual IP mapping in use.
:type in_use: bool
:param service_name: name of the service that virtual IP is assigned to.
:type service_name: str
"""
_attribute_map = {
'virtual_ip': {'key': 'virtualIP', 'type': 'str'},
'internal_http_port': {'key': 'internalHttpPort', 'type': 'int'},
'internal_https_port': {'key': 'internalHttpsPort', 'type': 'int'},
'in_use': {'key': 'inUse', 'type': 'bool'},
'service_name': {'key': 'serviceName', 'type': 'str'},
}
def __init__(
self,
*,
virtual_ip: Optional[str] = None,
internal_http_port: Optional[int] = None,
internal_https_port: Optional[int] = None,
in_use: Optional[bool] = None,
service_name: Optional[str] = None,
**kwargs
):
super(VirtualIPMapping, self).__init__(**kwargs)
self.virtual_ip = virtual_ip
self.internal_http_port = internal_http_port
self.internal_https_port = internal_https_port
self.in_use = in_use
self.service_name = service_name
class VirtualNetworkProfile(msrest.serialization.Model):
"""Specification for using a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource id of the Virtual Network.
:type id: str
:ivar name: Name of the Virtual Network (read-only).
:vartype name: str
:ivar type: Resource type of the Virtual Network (read-only).
:vartype type: str
:param subnet: Subnet within the Virtual Network.
:type subnet: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
subnet: Optional[str] = None,
**kwargs
):
super(VirtualNetworkProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
self.subnet = subnet
class VnetGateway(ProxyOnlyResource):
"""The Virtual Network gateway contract. This is used to give the Virtual Network gateway access to the VPN package.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_name: The Virtual Network name.
:type vnet_name: str
:param vpn_package_uri: The URI where the VPN package can be downloaded.
:type vpn_package_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vpn_package_uri': {'key': 'properties.vpnPackageUri', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_name: Optional[str] = None,
vpn_package_uri: Optional[str] = None,
**kwargs
):
super(VnetGateway, self).__init__(kind=kind, **kwargs)
self.vnet_name = vnet_name
self.vpn_package_uri = vpn_package_uri
class VnetInfo(ProxyOnlyResource):
"""Virtual Network information contract.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_resource_id: The Virtual Network's resource ID.
:type vnet_resource_id: str
:ivar cert_thumbprint: The client certificate thumbprint.
:vartype cert_thumbprint: str
:param cert_blob: A certificate file (.cer) blob containing the public key of the private key
used to authenticate a
Point-To-Site VPN connection.
:type cert_blob: str
:ivar routes: The routes that this Virtual Network connection uses.
:vartype routes: list[~azure.mgmt.web.v2020_06_01.models.VnetRoute]
:ivar resync_required: :code:`<code>true</code>` if a resync is required; otherwise,
:code:`<code>false</code>`.
:vartype resync_required: bool
:param dns_servers: DNS servers to be used by this Virtual Network. This should be a
comma-separated list of IP addresses.
:type dns_servers: str
:param is_swift: Flag that is used to denote if this is VNET injection.
:type is_swift: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'cert_thumbprint': {'readonly': True},
'routes': {'readonly': True},
'resync_required': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_resource_id': {'key': 'properties.vnetResourceId', 'type': 'str'},
'cert_thumbprint': {'key': 'properties.certThumbprint', 'type': 'str'},
'cert_blob': {'key': 'properties.certBlob', 'type': 'str'},
'routes': {'key': 'properties.routes', 'type': '[VnetRoute]'},
'resync_required': {'key': 'properties.resyncRequired', 'type': 'bool'},
'dns_servers': {'key': 'properties.dnsServers', 'type': 'str'},
| |
json.loads(ipam_response.text)['message'].startswith('IP address not in selected subnet'):
# The IP address we're trying to add does not belong to the parent IPAM subnets
print_with_timestamp_and_log("%s. Skipping it" % json.loads(ipam_response.text)['message'], log_file)
else:
# The IPAM server returned a 5xx status code: Error on server side:
print_with_timestamp_and_log("IPAM DB Server side error. Retry later.", log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
def sync_from_dnac(time_tag, ipam_token, ipam_addresses_url, log_file, webex_teams_api):
"""Connect to DNA Center, import its hosts, then process them in IPAM using
the passed ipam token, url and timestamp arguments.
"""
# Get the list of hosts from DNAC:
hosts_response = dnac_get_url("host", log_file, EMAIL_CONTENT_FILE)
hosts_list = hosts_response["response"]
# Add the DNAC hosts to the IPAM subnet defined globally:
print("\nSyncing hosts from DNA Center...")
log_file.write("\nSyncing hosts from DNA Center...\n")
for host in hosts_list:
payload = {
"subnetId": str(PHPIPAM_SUBNET_ID),
"ip": host["hostIp"],
"is_gateway": "0",
"description": "Connected to %s" % host["connectedNetworkDeviceName"],
"hostname": host["id"],
"mac": host["hostMac"],
"owner": TAG_DNAC,
"note": str(time_tag)
}
# Process the host in IPAM:
process_host_in_ipam(host["hostIp"], ipam_token, ipam_addresses_url, payload,
log_file, email_body, EMAIL_CONTENT_FILE, webex_teams_api)
def sync_from_static_csv(csv_file, time_tag, ipam_token, ipam_addresses_url, log_file, email_body,
EMAIL_CONTENT_FILE, webex_teams_api):
""" Reads the host rows from the CSV file and process them in IPAM
using the passed ipam token, url and timestamp arguments.
"""
try:
with open(csv_file, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
print("\nSyncing static hosts from local CSV file...")
log_file.write("\nSyncing static hosts from local CSV file...\n")
for host in reader:
# if the entry line starts by a valid IPv4 address, make the payload json body
# and process it in IPAM.
if is_valid_ipv4_address(host[0]):
payload = {
"subnetId": str(PHPIPAM_SUBNET_ID),
"ip": host[0],
"is_gateway": "0",
"description": "N/A" if host[1] == '' else host[1],
"hostname": "N/A" if host[2] == '' else host[2],
#"mac": "00:00:00:00:00:00" if not host[3] else host[3],
"owner": TAG_STATIC,
"note": str(time_tag)
}
# Add the host to the IPAM:
process_host_in_ipam(host[0], ipam_token, ipam_addresses_url, payload,
log_file, email_body, EMAIL_CONTENT_FILE, webex_teams_api)
else:
#Else, skip it and print an informational message
print_with_timestamp_and_log("Skipping an invalid host entry in CSV file: '%s'"
% host[0], log_file)
except EnvironmentError:
print_with_timestamp_and_log("Unable to open the CSV file. Please verify the file " \
"variable in the environment file and retry.", log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
def sync_from_ms_dhcp_server(time_tag, ipam_token, ipam_addresses_url, log_file, EMAIL_CONTENT_FILE,
webex_teams_api):
"""Connect to DHCP server via PowerShell Remoting, import its dhcp scopes leases,
then process them in IPAM using the passed ipam token, url and timestamp arguments.
"""
client = Client(DHCP_SERVER_FQDN, username=DHCP_SERVER_USERNAME,
password=<PASSWORD>, ssl=DHCP_SERVER_SSL)
# validate that all entered DHCP scopes in the list env variable are valid IP subnet addresses.
# If any invalid entry is found, exit the program.
for scope in DHCP_SERVER_SCOPES:
if not is_valid_ipv4_address(scope):
print("At least one invalid scope is found in MS DHCP Server scopes list. " \
"Please use valid IP subnet DHCP scope entries and retry.")
sys.exit(1)
# All scopes are valid, proceed with the sync from the DHCP server:
for scope in DHCP_SERVER_SCOPES:
command = r"Get-DhcpServerv4Lease -Scopeid %s" % scope
try:
dhcp_server_output, streams, had_errors = client.execute_ps(command)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError,
pypsrp.exceptions.AuthenticationError, requests.exceptions.HTTPError) as error:
print_with_timestamp_and_log("Unable to connect to the DHCP Server. Please verify " \
"settings and reachability.", log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
formatted_dhcp_server_output = dhcp_server_output.split("\n")
print("\nSyncing the leased hosts from the MS DHCP Server, scope %s..." % scope)
log_file.write("\nSyncing the leased hosts from the MS DHCP Server, scope %s...\n" % scope)
# Iterate through the list of hosts leases for this scope, starting from index 3
# to skip the empty line, then column names line, then the delimiter line:
for lease in range(3,len(formatted_dhcp_server_output)-2):
lease_list = formatted_dhcp_server_output[lease].split()
payload = ""
# when length of lease_list is 8, this means all the fields are populated
# including the hostname
if (len(lease_list) == 8) & (lease_list[4] == "Active"):
payload = {
"subnetId": str(PHPIPAM_SUBNET_ID),
"ip": lease_list[0],
"is_gateway": "0",
"description": lease_list[3],
"hostname": lease_list[3],
"mac": lease_list[2],
"owner": TAG_MSDHCP,
"note": str(time_tag)
}
# when length of lease_list is 7, this means the hostname field is empty.
# MAC address field is shifted to the left after the string split.
elif (len(lease_list) == 7) & (lease_list[3] == "Active"):
payload = {
"subnetId": str(PHPIPAM_SUBNET_ID),
"ip": lease_list[0],
"is_gateway": "0",
"description": "N/A",
"hostname": "N/A",
"mac": lease_list[2],
"owner": TAG_MSDHCP,
"note": str(time_tag)
}
# Add the host to the IPAM if it's an active lease:
if payload != "":
process_host_in_ipam(lease_list[0], ipam_token, ipam_addresses_url, payload,
log_file, email_body, EMAIL_CONTENT_FILE, webex_teams_api)
def sync_from_ios_dhcp_server(time_tag, ipam_token, ipam_addresses_url, log_file, EMAIL_CONTENT_FILE,
webex_teams_api):
"""Connect to IOS DHCP server via VTY, and import its dhcp binding database into IPAM
"""
#Open the VTY session:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(IOS_DHCP_SWITCH, port=IOS_DHCP_PORT, username=IOS_DHCP_USERNAME,
password=<PASSWORD>, look_for_keys=False, allow_agent=False)
except (socket.error, paramiko.ssh_exception.AuthenticationException,
paramiko.ssh_exception.NoValidConnectionsError, paramiko.ssh_exception.SSHException) as error:
print_with_timestamp_and_log("Unable to connect to IOS DHCP Server %s. Please verify " \
"settings and reachability. %s" % (IOS_DHCP_SWITCH, error), log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
print("\nSyncing the leased hosts from the IOS DHCP Server %s..." % IOS_DHCP_SWITCH)
log_file.write("\nSyncing the leased hosts from the IOS DHCP Server %s...\n" % IOS_DHCP_SWITCH)
stdin, stdout, stderr = ssh_client.exec_command("show ip dhcp binding")
# If no errors, parse the CLI output, else return an error:
if str(stderr.read()) == "b''":
cli_output = str(stdout.read()).split("\\n")
for line in cli_output:
# If the line starts with an IP address, create a payload based on it.
# Else, skip this line and do nothing.
if is_valid_ipv4_address(line.split(" ")[0]):
payload = {
"subnetId": str(PHPIPAM_SUBNET_ID),
"ip": line.split()[0],
"is_gateway": "0",
"description": "Added via IOS DHCP Server",
"hostname": "N/A",
"mac": convert_mac_address_format(line.split()[1]),
"owner": TAG_IOSDHCP,
"note": str(time_tag)
}
process_host_in_ipam(line.split()[0], ipam_token, ipam_addresses_url,
payload, log_file, email_body, EMAIL_CONTENT_FILE, webex_teams_api)
else:
print_with_timestamp_and_log("Unable to get the DHCP output from IOS Switch %s. " \
"Please retry later." % IOS_DHCP_SWITCH, log_file)
def delete_stale_hosts(source, time_tag, ipam_token, ipam_addresses_url, log_file, email_body,
EMAIL_CONTENT_FILE, webex_teams_api):
"""Deletes the hosts that have not been added/refreshed in the last script run source.
Source can be either one of the source tag variables defined globally.
This relies on the timestamp in the note field of the host in IPAM DB,
to be compared with the time_tag which is the timestamp at the start of the script execution.
"""
global email_flag
print("\nDeleting any stale hosts from IPAM server...")
log_file.write("\nDeleting any stale hosts from IPAM server...\n")
subnet_addresses_response = ipam_get_url("subnets/%s/addresses/" %(PHPIPAM_SUBNET_ID), log_file,
EMAIL_CONTENT_FILE)
if subnet_addresses_response["success"]:
for host in subnet_addresses_response["data"]:
host_sources_set = set(host["owner"].split(','))
host_timetag = host["note"]
if (host_timetag != str(time_tag)):
if host_sources_set == {source}:
ipam_address_delete_url = "http://%s:%s/api/%s/addresses/%s/" % (PHPIPAM_HOST,
PHPIPAM_PORT, PHPIPAM_APPID, host["id"])
try:
ipam_address_delete_response = requests.request("DELETE", ipam_address_delete_url,
headers={'token': ipam_token, 'Content-Type': "application/json"})
msg = "Host %s was deleted from IPAM DB" % host["ip"]
email_flag = notify_via_log_email_teams(msg, log_file, email_body, webex_teams_api)
except:
print_with_timestamp_and_log("Could not delete Host %s. Returned message from " \
"server: %s" %(host["ip"], ipam_address_delete_response.json()["message"]),
log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
elif source in host_sources_set: #update the host with the new sources set
host_sources_set.remove(source)
#print(host["ip"])
new_payload_owner = ','.join(list(host_sources_set))
new_payload = {"owner": new_payload_owner}
host_id = host["id"]
# Send the update API call:
ipam_address_update_url = "http://%s:%s/api/%s/addresses/%s/" % (PHPIPAM_HOST,
PHPIPAM_PORT, PHPIPAM_APPID, host_id)
try:
ipam_address_update_response = requests.request("PATCH", ipam_address_update_url,
data=json.dumps(new_payload), headers={'token': ipam_token,
'Content-Type': "application/json"})
ipam_address_update_response.raise_for_status()
except:
print_with_timestamp_and_log("Error processing IPAM update API request. Please " \
"verify settings and reachability.", log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
else:
# Could not get the addresses from the IPAM subnet
print_with_timestamp_and_log("Unable to get the subnet addresses from the IPAM. " \
"Please Retry later.", log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
def verify_ipam_subnet_usage(log_file, email_body, EMAIL_CONTENT_FILE):
"""Returns the summary of the current usage of the IPAM subnet.
"""
print("\nCurrent IPAM server subnet usage:")
log_file.write("\nCurrent IPAM server subnet usage:\n")
ipam_subnet_response = ipam_get_url("subnets/%s/usage/" %(PHPIPAM_SUBNET_ID), log_file,
EMAIL_CONTENT_FILE)
if ipam_subnet_response["success"]:
column_names = "{0:20}{1:20}{2:20}{3:20}{4:20}".format("Subnet ID","Used Hosts",
"Free Hosts","Used Percent","Freehosts Percent")
column_values = "{0:20}{1:20}{2:20}{3:20}{4:20}".format(str(PHPIPAM_SUBNET_ID),
ipam_subnet_response["data"]["used"], ipam_subnet_response["data"]["freehosts"],
str(round(ipam_subnet_response["data"]["Used_percent"],3)),
str(round(ipam_subnet_response["data"]["freehosts_percent"],3)))
print(column_names)
print(column_values)
log_file.write("%s\n%s" % (column_names, column_values))
else:
print_with_timestamp_and_log("Unable to get the subnet usage info from the IPAM. " \
"Please retry later.", log_file)
cleanup_before_exit(log_file, email_body, EMAIL_CONTENT_FILE)
sys.exit(1)
def send_email():
"""Send the email notification containing addresses additions/deletions logs
When the email_flag is set, which means there was at least one new address learned/deleted.
"""
if email_flag:
with open(EMAIL_CONTENT_FILE) as fp:
msg = MIMEText(fp.read())
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM_ADDRESS
msg['To'] = EMAIL_TO_ADDRESS_LIST
sl = smtplib.SMTP(EMAIL_SERVER)
sl.send_message(msg)
sl.quit()
#print("\nAn email listing the new | |
= None,
last_modified: Optional[str] = None,
e_tag: Optional[str] = None,
**kwargs
):
"""
:keyword name:
:paramtype name: str
:keyword last_modified:
:paramtype last_modified: str
:keyword e_tag:
:paramtype e_tag: str
"""
super(FileSystem, self).__init__(**kwargs)
self.name = name
self.last_modified = last_modified
self.e_tag = e_tag
class FileSystemList(msrest.serialization.Model):
"""FileSystemList.
:ivar filesystems:
:vartype filesystems: list[~azure.storage.filedatalake.models.FileSystem]
"""
_attribute_map = {
'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'},
}
def __init__(
self,
*,
filesystems: Optional[List["FileSystem"]] = None,
**kwargs
):
"""
:keyword filesystems:
:paramtype filesystems: list[~azure.storage.filedatalake.models.FileSystem]
"""
super(FileSystemList, self).__init__(**kwargs)
self.filesystems = filesystems
class LeaseAccessConditions(msrest.serialization.Model):
"""Parameter group.
:ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and
matches this ID.
:vartype lease_id: str
"""
_attribute_map = {
'lease_id': {'key': 'leaseId', 'type': 'str'},
}
def __init__(
self,
*,
lease_id: Optional[str] = None,
**kwargs
):
"""
:keyword lease_id: If specified, the operation only succeeds if the resource's lease is active
and matches this ID.
:paramtype lease_id: str
"""
super(LeaseAccessConditions, self).__init__(**kwargs)
self.lease_id = lease_id
class ListBlobsHierarchySegmentResponse(msrest.serialization.Model):
"""An enumeration of blobs.
All required parameters must be populated in order to send to Azure.
:ivar service_endpoint: Required.
:vartype service_endpoint: str
:ivar container_name: Required.
:vartype container_name: str
:ivar prefix:
:vartype prefix: str
:ivar marker:
:vartype marker: str
:ivar max_results:
:vartype max_results: int
:ivar delimiter:
:vartype delimiter: str
:ivar segment: Required.
:vartype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
:ivar next_marker:
:vartype next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'container_name': {'required': True},
'segment': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str'},
'marker': {'key': 'Marker', 'type': 'str'},
'max_results': {'key': 'MaxResults', 'type': 'int'},
'delimiter': {'key': 'Delimiter', 'type': 'str'},
'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
service_endpoint: str,
container_name: str,
segment: "BlobHierarchyListSegment",
prefix: Optional[str] = None,
marker: Optional[str] = None,
max_results: Optional[int] = None,
delimiter: Optional[str] = None,
next_marker: Optional[str] = None,
**kwargs
):
"""
:keyword service_endpoint: Required.
:paramtype service_endpoint: str
:keyword container_name: Required.
:paramtype container_name: str
:keyword prefix:
:paramtype prefix: str
:keyword marker:
:paramtype marker: str
:keyword max_results:
:paramtype max_results: int
:keyword delimiter:
:paramtype delimiter: str
:keyword segment: Required.
:paramtype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
:keyword next_marker:
:paramtype next_marker: str
"""
super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.container_name = container_name
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.delimiter = delimiter
self.segment = segment
self.next_marker = next_marker
class ModifiedAccessConditions(msrest.serialization.Model):
"""Parameter group.
:ivar if_modified_since: Specify this header value to operate only on a blob if it has been
modified since the specified date/time.
:vartype if_modified_since: ~datetime.datetime
:ivar if_unmodified_since: Specify this header value to operate only on a blob if it has not
been modified since the specified date/time.
:vartype if_unmodified_since: ~datetime.datetime
:ivar if_match: Specify an ETag value to operate only on blobs with a matching value.
:vartype if_match: str
:ivar if_none_match: Specify an ETag value to operate only on blobs without a matching value.
:vartype if_none_match: str
"""
_attribute_map = {
'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'},
'if_match': {'key': 'ifMatch', 'type': 'str'},
'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'},
}
def __init__(
self,
*,
if_modified_since: Optional[datetime.datetime] = None,
if_unmodified_since: Optional[datetime.datetime] = None,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
):
"""
:keyword if_modified_since: Specify this header value to operate only on a blob if it has been
modified since the specified date/time.
:paramtype if_modified_since: ~datetime.datetime
:keyword if_unmodified_since: Specify this header value to operate only on a blob if it has not
been modified since the specified date/time.
:paramtype if_unmodified_since: ~datetime.datetime
:keyword if_match: Specify an ETag value to operate only on blobs with a matching value.
:paramtype if_match: str
:keyword if_none_match: Specify an ETag value to operate only on blobs without a matching
value.
:paramtype if_none_match: str
"""
super(ModifiedAccessConditions, self).__init__(**kwargs)
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
self.if_match = if_match
self.if_none_match = if_none_match
class Path(msrest.serialization.Model):
"""Path.
:ivar name:
:vartype name: str
:ivar is_directory:
:vartype is_directory: bool
:ivar last_modified:
:vartype last_modified: str
:ivar e_tag:
:vartype e_tag: str
:ivar content_length:
:vartype content_length: long
:ivar owner:
:vartype owner: str
:ivar group:
:vartype group: str
:ivar permissions:
:vartype permissions: str
:ivar encryption_scope: The name of the encryption scope under which the blob is encrypted.
:vartype encryption_scope: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_directory': {'key': 'isDirectory', 'type': 'bool'},
'last_modified': {'key': 'lastModified', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'content_length': {'key': 'contentLength', 'type': 'long'},
'owner': {'key': 'owner', 'type': 'str'},
'group': {'key': 'group', 'type': 'str'},
'permissions': {'key': 'permissions', 'type': 'str'},
'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
is_directory: Optional[bool] = False,
last_modified: Optional[str] = None,
e_tag: Optional[str] = None,
content_length: Optional[int] = None,
owner: Optional[str] = None,
group: Optional[str] = None,
permissions: Optional[str] = None,
encryption_scope: Optional[str] = None,
**kwargs
):
"""
:keyword name:
:paramtype name: str
:keyword is_directory:
:paramtype is_directory: bool
:keyword last_modified:
:paramtype last_modified: str
:keyword e_tag:
:paramtype e_tag: str
:keyword content_length:
:paramtype content_length: long
:keyword owner:
:paramtype owner: str
:keyword group:
:paramtype group: str
:keyword permissions:
:paramtype permissions: str
:keyword encryption_scope: The name of the encryption scope under which the blob is encrypted.
:paramtype encryption_scope: str
"""
super(Path, self).__init__(**kwargs)
self.name = name
self.is_directory = is_directory
self.last_modified = last_modified
self.e_tag = e_tag
self.content_length = content_length
self.owner = owner
self.group = group
self.permissions = permissions
self.encryption_scope = encryption_scope
class PathHTTPHeaders(msrest.serialization.Model):
"""Parameter group.
:ivar cache_control: Optional. Sets the blob's cache control. If specified, this property is
stored with the blob and returned with a read request.
:vartype cache_control: str
:ivar content_encoding: Optional. Sets the blob's content encoding. If specified, this property
is stored with the blob and returned with a read request.
:vartype content_encoding: str
:ivar content_language: Optional. Set the blob's content language. If specified, this property
is stored with the blob and returned with a read request.
:vartype content_language: str
:ivar content_disposition: Optional. Sets the blob's Content-Disposition header.
:vartype content_disposition: str
:ivar content_type: Optional. Sets the blob's content type. If specified, this property is
stored with the blob and returned with a read request.
:vartype content_type: str
:ivar content_md5: Specify the transactional md5 for the body, to be validated by the service.
:vartype content_md5: bytearray
:ivar transactional_content_hash: Specify the transactional md5 for the body, to be validated
by the service.
:vartype transactional_content_hash: bytearray
"""
_attribute_map = {
'cache_control': {'key': 'cacheControl', 'type': 'str'},
'content_encoding': {'key': 'contentEncoding', 'type': 'str'},
'content_language': {'key': 'contentLanguage', 'type': 'str'},
'content_disposition': {'key': 'contentDisposition', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'content_md5': {'key': 'contentMD5', 'type': 'bytearray'},
'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'},
}
def __init__(
self,
*,
cache_control: Optional[str] = None,
content_encoding: Optional[str] = None,
content_language: Optional[str] = None,
content_disposition: Optional[str] = None,
content_type: Optional[str] = None,
content_md5: Optional[bytearray] = None,
transactional_content_hash: Optional[bytearray] = None,
**kwargs
):
"""
:keyword cache_control: Optional. Sets the blob's cache control. If specified, this property is
stored with the blob and returned with a read request.
:paramtype cache_control: str
:keyword content_encoding: Optional. Sets the blob's content encoding. If specified, this
property is stored with the blob and returned with a read request.
:paramtype content_encoding: str
:keyword content_language: Optional. Set the blob's content language. If specified, this
property is stored with the blob and returned with a read request.
:paramtype content_language: str
:keyword content_disposition: Optional. Sets the blob's Content-Disposition header.
:paramtype content_disposition: str
:keyword content_type: Optional. Sets the blob's content type. If specified, this property is
stored with the blob and returned with a read request.
:paramtype content_type: str
:keyword content_md5: Specify the transactional md5 for the body, to be validated by the
service.
:paramtype content_md5: bytearray
:keyword transactional_content_hash: Specify the transactional md5 for the body, to be
validated by the service.
:paramtype transactional_content_hash: bytearray
"""
super(PathHTTPHeaders, self).__init__(**kwargs)
self.cache_control = cache_control
self.content_encoding = content_encoding
self.content_language = content_language
self.content_disposition = content_disposition
self.content_type = content_type
self.content_md5 = content_md5
self.transactional_content_hash = transactional_content_hash
class PathList(msrest.serialization.Model):
"""PathList.
:ivar paths:
:vartype paths: list[~azure.storage.filedatalake.models.Path]
"""
_attribute_map = {
'paths': {'key': 'paths', 'type': '[Path]'},
}
| |
tol = 1e-4, by_param = False, rev = False, add = True, infa = False, dim = 3 ):
"""
Description:
Creates a tip viscous layer volume following a foil edge.
Arguments:
# dist
Description: The offset distance normal to the wing tip.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# offset
Description: The edge describing the offset of the viscous layer in the wing tip plane.
Type: Edge
GUI selection: -
Selection by name: yes
Recursive: -
Default value: -
# foil
Description: The edge touching the wing tip.
Type: Edge
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# style
Description: See here.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "smooth"
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 40
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-4
# by_param
Description: Defines if the function has to create two points at the same position on the foil edge and on the offset edge respectively by using a same distance from the edge start (True) or the same parameter on the edge (False). In some cases, switch this parameter can give better results.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 3
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edge
Number: 2
Name: "TipViscousLayer (Edges)"
"dim" value: 2
"single" value: -
Type: Compound of Faces
Number: 1
Name: "TipViscousLayer (Faces)"
"dim" value: 3
"single" value: -
Type: Solid
Number: 1
Name: "TipViscousLayer"
Conditions of use:
The input edges have to be open.
"""
if dim == 0: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
foil = GetGUISelection(foil, uniq = True)
[foil, offset] = GetObject([foil, offset])
#-
# Check the input shape existence
if "error" in [foil, offset] or None in [foil, offset]: return
#-
# Set father object
father = None
if infa == True: father = foil
#-
if False: pass
else:# All checks done
# Get the sub-shapes
[foil, offset] = GetSubShapes([foil, offset])
#-
# Get the edge lengths
foil_length = geompy.BasicProperties(foil[-1])[0]
offset_length = geompy.BasicProperties(offset[-1])[0]
#-
# Get the offset edge sense
linking_edge_1 = geompy.MakeEdge(foil[0][0], offset[0][0])
linking_edge_2 = geompy.MakeEdge(foil[0][0], offset[0][1])
linking_edge_1_length = geompy.BasicProperties(linking_edge_1)[0]
linking_edge_2_length = geompy.BasicProperties(linking_edge_2)[0]
reverse_length = False
if linking_edge_1_length > linking_edge_2_length:
reverse_length = True
#-
# Get the foil normal vector
face = geompy.MakeQuad2Edges(foil[-1], offset[-1])
normal_vector = geompy.GetNormal(face)
#-
filling_edges_3d = []
filling_edges_2d = []
boundary_faces = []
if rev == True:
dist *= -1.0
if curv == True:
parameter_list = DiscretizeEdgeByCurvature(foil[-1], np, dim = -1)
else:
parameter_list = [n / float(np) for n in range(np + 1)]
#-
# Create the offset vertexes
for parameter in parameter_list:# For each position on the foil edge...
#for parameter in [1 - n / float(np - 1) for n in range(np)]:# For each position on the foil edge...
# Create the vertexes
if by_param == True:
foil_vertex = geompy.MakeVertexOnCurve(foil[-1], parameter)
else:
foil_vertex = geompy.MakeVertexOnCurveByLength(foil[-1], parameter * foil_length, foil[0][0])
if reverse_length == True:
parameter = 1.0 - parameter
if by_param == True:
offset_vertex = geompy.MakeVertexOnCurve(offset[-1], parameter)
else:
offset_vertex = geompy.MakeVertexOnCurveByLength(offset[-1], parameter * offset_length, offset[0][0])
translated_vertex = geompy.MakeTranslationVectorDistance(foil_vertex, normal_vector, dist)
#-
# Create the 2D filling edge
filling_edge_2d = geompy.MakeEdge(foil_vertex, offset_vertex)
filling_edges_2d.append(filling_edge_2d)
#-
# Create the 3D filling edge
if style == "smooth":
filling_edge_3d = geompy.MakeArcOfEllipse(foil_vertex, offset_vertex, translated_vertex)
filling_edges_3d.append(filling_edge_3d)
else:
filling_edge_3d = geompy.MakeEdge(offset_vertex, translated_vertex)
filling_edges_3d.append(filling_edge_3d)
#-
if dim >= 2:
if parameter == 0 or parameter == 1:# If it is the first or the last position...
# Create the boundary face
third_edge = geompy.MakeEdge(foil_vertex, translated_vertex)
boundary_faces.append(geompy.MakeFaceWires([filling_edge_3d, filling_edge_2d, third_edge], True))
#-
# Put the filling edges into compounds
filling_edge_compound_2d = geompy.MakeCompound(filling_edges_2d)
filling_edge_compound_3d = geompy.MakeCompound(filling_edges_3d)
#-
# Add and return the resulting shape(s)
if dim == 1:
if add == True:
AddToStudy(filling_edge_compound_2d, "TipViscousLayer (Edges)", father)
AddToStudy(filling_edge_compound_3d, "TipViscousLayer (Edges)", father)
return [filling_edge_compound_2d, filling_edge_compound_3d]
#-
else:
# Create the fillings
filling_2d = geompy.MakeFilling(filling_edge_compound_2d, theMinDeg = 15, theMaxDeg = 20, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
filling_3d = geompy.MakeFilling(filling_edge_compound_3d, theMinDeg = 15, theMaxDeg = 20, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
#-
# Extrude the foil edge
foil_extension = geompy.MakePrismVecH(foil[-1], normal_vector, dist)
#-
# Create the compound from faces
face_compound = geompy.MakeCompound([filling_2d, filling_3d, foil_extension, boundary_faces[0], boundary_faces[1]])
#-
# Add and return the resulting shape(s)
if dim == 2:
if add == True:
AddToStudy(face_compound, "TipViscousLayer (Faces)", father)
return face_compound
#-
else:
# Glue the edges
gluing_tolerance = tol
while True:
free_boundaries = geompy.GetFreeBoundary(face_compound)[1]
if len(free_boundaries) == 0:
break
face_compound = geompy.MakeGlueEdges(face_compound, gluing_tolerance)
gluing_tolerance *= 2
#-
# Create the shell form the compound
shell = geompy.MakeShell([face_compound])
#-
# Create the solid from the shell
solid = geompy.MakeSolid([shell])
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(solid, "TipViscousLayer", father)
return solid
#-
mtvl = MakeTipViscousLayer
def ExtendTipViscousLayer( shell_and_compound = [None], np = 40, tol = 1e-7, add = True, infa = False, dim = 3 ):
"""
Description:
Extends a tip viscous layer.
Arguments:
# shell_and_compound
Description: the input shell to extend and its guiding edge compound.
Type: List of 1 Shell + 1 Compound of Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 40
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 3
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edges
Number: 5 or 8
Name: "TipViscousLayerExtension (Edges)"
"dim" value: 2
"single" value: -
Type: Compound of Faces
Number: 2 or 3
Name: "TipViscousLayerExtension (Faces)"
"dim" value: 3
"single" value: -
Type: Compound of Solids
Number: 1
Name: "TipViscousLayerExtension"
Conditions of use:
The input shell has to contain 2 faces having the shape of triangles or ellipse quarters and an optional middle face being a quadrangle. The edge compound has to have all the characteristics of a compound build with the ExtendViscousLayer function.
"""
if isinstance(shell_and_compound, list) == False: print "[X] The first argument (shell_and_compound) should be an array."; return
if isinstance(np, str): print "[X] The second argument (np) should be an integer ."; return
if dim == 0: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
shell_and_compound = GetGUISelection(shell_and_compound)
shell_and_compound = GetObject(shell_and_compound)
#-
# Check the input shape existence
if "error" in shell_and_compound or None in shell_and_compound: return
#-
# Check the number of selected objects
if len(shell_and_compound) != 2:
print "[X] Two objects should be selected."
return
#-
# Distinguish input shapes
shell = None
compound = None
for object in shell_and_compound:
nb_faces = geompy.NumberOfFaces(object)
if nb_faces > 0:
shell = object
else:
compound = object
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Check if the input shape is "shell-shaped"
shell_faces = GetSubShapes(shell)[2]
try:
shell = geompy.MakeShell(shell_faces)
except:
print "[X] The input 2D shape should be \"shell-shaped\"."; return
#-
# Keep edges touching the input shell
compound_edges = GetSubShapes(compound)[1]
edges_to_keep = []
for edge in compound_edges:
edge_vertexes = GetSubShapes(edge)[0]
distance_1 = geompy.MinDistance(edge_vertexes[0], shell)
distance_2 = geompy.MinDistance(edge_vertexes[1], shell)
distances = [distance_1, distance_2]
if min(distances) <= tol and max(distances) > tol:
edges_to_keep.append(edge)
compound = geompy.MakeCompound(edges_to_keep)
#-
# Get the sub - geometries
[shell, compound] = GetSubShapes([shell, compound])
#-
# Get the normal direction
compound_vertex_compound = geompy.MakeCompound(compound[0])
shell_vertex_compound = geompy.MakeCompound(shell[0])
top_vertex_compound = geompy.MakeCut(shell_vertex_compound, compound_vertex_compound)
top_vertex = None
for vertex in shell[0]:
distance = geompy.MinDistance(vertex, compound[-1])
if distance > tol:
top_vertex = vertex
break
bottom_vertex = geompy.GetShapesNearPoint(compound[-1], top_vertex, geompy.ShapeType["VERTEX"])
normal = geompy.MakeVector(bottom_vertex, top_vertex)
#-
# Get root normal thickness
root_normal_thickness = geompy.BasicProperties(normal)[0]
#-
# Distinguish inside and outside edges
inside_edges = []
outside_edges = []
for edge in compound[1]:
edge_vertexes = geompy.SubShapeAll(edge, geompy.ShapeType["VERTEX"])
for edge_vertex in edge_vertexes:
min_distance = geompy.MinDistance(edge_vertex, shell[-1])
if min_distance <= tol:
nb_contacts = 0
for face in shell[2]:
min_distance = | |
self.getConfigureOption('sp executable') + ' --print-log'
# PROBLEM, SOLVER
prob_name = self.getConfigureOption('sensor placement')
solver_name = self.getConfigureOption('solver')
# OBJECTIVE
for i in range(len(self.getProblemOption('objective'))):
obj_name = self.getProblemOption('objective', prob_name)
# IMPACT objective
if lower(self.getObjectiveOption('goal', obj_name)) in self.impact_goals:
impact_name = self.getObjectiveOption('goal', obj_name)
impactFile = self.getImpactOption('original impact file',impact_name)
impactName = impactFile.rsplit('.',1)[0]
impactNetwork = impactName.rsplit('_',1)[0]
impactMetric = impactName.rsplit('_',1)[-1]
if network_opt:
cmd = cmd + " --network="+impactNetwork
#if self.getImpactOption('directory', impact_name) not in none_list:
# cmd = cmd + " --impact-dir="+self.getImpactOption('original directory', impact_name)
if self.getImpactOption('weight file', impact_name) not in none_list:
cmd = cmd + " --incident-weights="+str(self.getImpactOption('weight file', impact_name))
#if self.getImpactOption('response time', impact_name) not in none_list:
#cmd = cmd + " --responseTime="+str(self.getImpactOption('response time', impact_name))
network_opt = False
cmd = cmd + " --objective="+impactMetric.lower()+"_"+self.getObjectiveOption('statistic', obj_name).lower()
# COST objective
elif lower(self.getObjectiveOption('goal', obj_name)) in self.cost_goals:
cmd = cmd + " --objective=cost" # statistic = total
if cost_opt:
#cost_name = self.getObjectiveOption('goal', obj_name)
#if self.getCostOption('directory', cost_name) not in none_list:
# cmd = cmd + " --costs="+str(os.path.join(self.getCostOption('directory', cost_name),self.getCostOption('cost file', cost_name)))
#else:
# cmd = cmd + " --costs="+str(self.getCostOption('cost file', cost_name))
cmd = cmd + " --costs="+self.single_cost_file
cost_opt = False
# NFD, NS objective
elif lower(self.getObjectiveOption('goal', obj_name)) in self.other_goals:
cmd = cmd + " --objective="+self.getObjectiveOption('goal', obj_name).lower()+"_"+self.getObjectiveOption('statistic', obj_name).lower()
else:
raise RuntimeError, "Invalid objective goal '%s'. Valid goals = %s, cost name, NS, NFD" % (self.getObjectiveOption('goal', obj_name), ', '.join(self.impact_goals))
if gamma_opt:
if lower(self.getObjectiveOption('statistic', obj_name)) in ['var', 'cvar']:
#cmd = cmd + " --gamma="+str(self.getObjectiveOption('gamma', obj_name))
cmd = cmd + " --gamma="+str(self.single_gamma)
gamma_opt = False
# CONSTRAINT
for const_name in self.getProblemOption('constraint', prob_name):
# IMPACT constraint
if lower(self.getConstraintOption('goal', const_name)) in self.impact_goals:
impact_name = self.getConstraintOption('goal', const_name)
impactFile = self.getImpactOption('original impact file',impact_name)
impactName = impactFile.rsplit('.',1)[0]
impactNetwork = impactName.rsplit('_',1)[0]
impactMetric = impactName.rsplit('_',1)[-1]
if network_opt:
cmd = cmd + " --network="+impactNetwork
#if self.getImpactOption('directory', impact_name) not in none_list:
# cmd = cmd + " --impact-dir="+self.getImpactOption('original directory', impact_name)
if self.getImpactOption('weight file', impact_name) not in none_list:
cmd = cmd + " --incident-weights="+str(self.getImpactOption('weight file', impact_name))
#if self.getImpactOption('response time', impact_name) not in none_list:
# cmd = cmd + " --responseTime="+str(self.getImpactOption('response time', impact_name))
network_opt = False
else:
print "Inconsistent network name"
cmd = cmd + " --ub="+impactMetric.lower()+"_"+self.getConstraintOption('statistic', const_name).lower()+","+str(self.getConstraintOption('bound', const_name))
# COST constraint
elif lower(self.getConstraintOption('goal', const_name)) in self.cost_goals:
cmd = cmd + " --ub=cost,"+str(self.getConstraintOption('bound', const_name)) # statistic = total
if cost_opt:
#cost_name = self.getConstraintOption('goal', const_name)
#if self.getCostOption('directory', cost_name) not in none_list:
# scmd = cmd + str(os.path.join(self.getCostOption('directory', cost_name),self.getCostOption('cost file', cost_name)))
#else:
# cmd = cmd + str(self.getCostOption('cost file', cost_name))
cmd = cmd + " --costs="+self.single_cost_file
cost_opt = False
# NFD, NS constraint
elif lower(self.getConstraintOption('goal', const_name)) in self.other_goals:
cmd = cmd + " --ub="+self.getConstraintOption('goal', const_name).lower()+","+str(self.getConstraintOption('bound', const_name))
else:
raise RuntimeError, "Invalid constraint goal '%s'. Valid goals = %s, %s, NS, NFD" % (self.getConstraintOption('goal', const_name), ', '.join(self.impact_goals), ', '.join(self.cost_goals))
if gamma_opt:
if lower(self.getConstraintOption('statistic', const_name)) in ['var', 'cvar']:
#cmd = cmd + " --gamma="+str(self.getConstraintOption('gamma', const_name))
cmd = cmd + " --gamma="+self.single_gamma
gamma_opt = False
# SOLVER
cmd = cmd + " --solver="+self.getSolverOption('type')
if self.getSolverOption('seed') not in none_list:
cmd = cmd + " --seed="+str(self.getSolverOption('seed'))
# Are these solver specific?
if self.getSolverOption('representation') not in none_list:
cmd = cmd + " --grasp-representation="+str(self.getSolverOption('representation'))
if self.getSolverOption('number of samples') not in none_list:
cmd = cmd + " --numsamples="+str(self.getSolverOption('number of samples'))
if self.getSolverOption('timelimit') not in none_list:
cmd = cmd + " --runtime="+str(self.getSolverOption('timelimit'))
if self.getSolverOption('notify') not in none_list:
cmd = cmd + " --notify="+str(self.getSolverOption('notify'))
if self.getProblemOption('compute bound'):
cmd = cmd + " --compute-bound"
if self.getConfigureOption('print log'):
cmd = cmd + " --print-log"
# PATH options (required for now)
sp_problem_dir = os.path.dirname(os.path.abspath(__file__))
wst_dir = sp_problem_dir+'/../../../..'
cmd = cmd + " --path="+wst_dir+"/bin/"
cmd = cmd + " --path="+wst_dir+"/etc/mod/"
# TODO the above 4 lines should be replaced by
#for path in self.getConfigureOption('path')
# cmd = cmd + " --path="+path
# LOCATION options
cmd = cmd + " --sensor-locations="+self.getConfigureOption('output prefix')+'_location'
# AGGREGATE options
if self.getProblemOption('aggregate') not in none_list:
if len(self.getProblemOption('aggregate')) > 1:
print "Warning: Multiple aggregate blocks selected. Using aggregate block " + self.getProblemOption('aggregate')[0]
agg_name = self.getProblemOption('aggregate')
# IMPACT
if lower(self.getAggregateOption('goal', agg_name)) in self.impact_goals:
impact_name = self.getAggregateOption('goal', agg_name)
impactFile = self.getImpactOption('original impact file',impact_name)
impactName = impactFile.rsplit('.',1)[0]
impactNetwork = impactName.rsplit('_',1)[0]
impactMetric = impactName.rsplit('_',1)[-1]
type = lower(self.getAggregateOption('type', agg_name))
value = self.getAggregateOption('value', agg_name)
if type == 'threshold':
cmd = cmd + " --aggregation-threshold="+impactMetric.lower()+','+str(value)
if type == 'percent':
cmd = cmd + " --aggregation-percent="+impactMetric.lower()+','+str(value)
if type == 'ratio':
cmd = cmd + " --aggregation-ratio="+impactMetric.lower()+','+str(value)
if self.getAggregateOption('conserve memory', agg_name) not in none_list:
cmd = cmd + " --conserve-memory="+str(self.getAggregateOption('conserve memory', agg_name))
if self.getAggregateOption('distinguish detection', agg_name) not in none_list:
cmd = cmd + " --distinguish-detection="+str(self.getAggregateOption('distinguish detection', agg_name))
if self.getAggregateOption('disable aggregation', agg_name) not in none_list:
cmd = cmd + " --disable-aggregation="+str(self.getAggregateOption('disable aggregation', agg_name))
# IMPERFECT options
# TODO: modify sc and jc files based on fixed/infeasible data
if self.getProblemOption('imperfect') not in none_list:
if len(self.getProblemOption('imperfect')) > 1:
print "Warning: Multiple imperfect blocks selected. Using imperfect block " + self.getProblemOption('imperfect')[0]
imperf_name = self.getProblemOption('imperfect')[0]
if self.getImperfectOption('sensor class file', imperf_name) not in none_list:
cmd = cmd + " --imperfect-scfile="+self.getImperfectOption('sensor class file', imperf_name)
if self.getImperfectOption('junction class file', imperf_name) not in none_list:
cmd = cmd + " --imperfect-jcfile="+self.getImperfectOption('junction class file', imperf_name)
# CONFIGURE options
cmd = cmd + " --tmp-file="+str(self.getConfigureOption('output prefix'))+"_tmp"
cmd = cmd + " --output="+str(self.getConfigureOption('output prefix'))+".sensors"
#cmd = cmd + " --summary="+str(self.getConfigureOption('output prefix', 0))+".summary"
if self.getConfigureOption('memmon'):
cmd = cmd + " --memmon"
if self.getConfigureOption('memcheck') not in none_list:
cmd = cmd + " --memcheck="+str(self.getConfigureOption('memcheck'))
if self.getConfigureOption('format') not in none_list:
cmd = cmd + " --format="+str(self.getConfigureOption('format'))
#if self.getConfigureOption('path') not in none_list:
# cmd = cmd + " --path="+str(self.getConfigureOption('path'))
#if self.getConfigureOption('ampl cplex path') not in none_list:
# cmd = cmd + " --amplcplexpath="+str(self.getConfigureOption('ampl cplex path'))
#if self.getConfigureOption('pico path',) not in none_list:
# cmd = cmd + " --picopath="+str(self.getConfigureOption('pico path'))
#if self.getConfigureOption('glpk path') not in none_list:
# cmd = cmd + " --glpkpath="+str(self.getConfigureOption('glpk path'))
if self.getConfigureOption('ampl') not in none_list:
cmd = cmd + " --ampl="+str(self.getConfigureOption('ampl'))
if self.getConfigureOption('ampl data') not in none_list:
cmd = cmd + " --ampldata="+str(self.getConfigureOption('ampl data'))
if self.getConfigureOption('ampl model') not in none_list:
cmd = cmd + " --amplmodel="+str(self.getConfigureOption('ampl model'))
if self.getConfigureOption('debug'):
cmd = cmd + " --debug"
if self.getConfigureOption('gap') not in none_list:
cmd = cmd + " --gap="+str(self.getConfigureOption('gap'))
if self.getProblemOption('compute greedy ranking'):
cmd = cmd + " --compute-greedy-ranking"
if self.getConfigureOption('evaluate all'):
cmd = cmd + " --eval-all"
if self.getConfigureOption('version'):
cmd = cmd + " --version"
logger.info("Running spold")
logger.debug(cmd)
tmpdir = os.path.dirname(self.opts['configure']['output prefix'])
out = os.path.join(tmpdir, 'spold.out') #pyutilib.services.TempfileManager.create_tempfile(dir=tmpdir, prefix='tmp_', suffix='spold.out')
sim_timelimit = None
sub_logger = logging.getLogger('wst.sp.spold.exec')
sub_logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(out, mode='w')
sub_logger.addHandler(fh)
try:
p = pyutilib.subprocess.run(cmd,timelimit=sim_timelimit,stdout=pywst.common.problem.LoggingFile(sub_logger))
#pyutilib.subprocess.run(cmd, self.getConfigureOption('output prefix')+".out")
#rc, output = pyutilib.subprocess.run(cmd, tee=True)
if p[0]:
msg = "ERROR executing the 'sp' script"
logger.error(msg)
raise RuntimeError(msg)
# remove temporary files if debug = 0
if self.opts['configure']['debug'] == 0:
pyutilib.services.TempfileManager.clear_tempfiles()
# write output file
prefix = os.path.basename(self.opts['configure']['output prefix'])
logfilename = logger.parent.handlers[0].baseFilename
outfilename = logger.parent.handlers[0].baseFilename.replace('.log','.yml')
# YAML output
config = wst_config.output_config()
module_blocks = ("general", "sensor placement")
template_options = {
'general':{
'cpu time': time.time() - startTime,
'log file': None},
'sensor placement': {} }
if outfilename != None:
self.saveOutput(outfilename, config, module_blocks, template_options)
# print solution to screen
logger.info("\nWST normal termination")
logger.info("---------------------------")
dir_ = os.path.dirname(logfilename)
if dir_ == "":
dir_ = '.'
logger.info("Directory: " + dir_)
logger.info("Results file: "+os.path.basename(outfilename))
logger.info("Log file: "+os.path.basename(logfilename)+'\n')
finally:
sub_logger.removeHandler(fh)
fh.close()
def get_index(self, block, name):
for i, val in enumerate(self.opts[block]):
if val['name'] == name:
return i
raise RuntimeError, "There is no '%s' with name '%s'" % (block, name)
#name_indexer = dict((p['name'], i) for i, p in enumerate(self.opts[block]))
#try:
# return name_indexer[name]
#except:
# raise RuntimeError, "There is no '%s' with name '%s'" % (block, name)
def preprocess(self, tmpdata_prefix, spold=False):
# Preprocess step includes:
# Read in nodemap file, create junction map
# Read in cost file, create cost map
# Remove comments from impact file
# Remove fixed and infeasible nodes from | |
(pointA[0] + self.backpackSlot9ItemTypeW, pointA[1] + self.backpackSlot9ItemTypeH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot9ItemAmount(self):
pointA = (self.backpackSlot9ItemAmountX1, self.backpackSlot9ItemAmountY1)
pointB = (pointA[0] + self.backpackSlot9ItemAmountW, pointA[1] + self.backpackSlot9ItemAmountH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot9ItemBars(self):
pointA = (self.backpackSlot9ItemBarsX1, self.backpackSlot9ItemBarsY1)
pointB = (pointA[0] + self.backpackSlot9ItemBarsW, pointA[1] + self.backpackSlot9ItemBarsH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot10(self):
pointA = (self.backpackSlot10X1, self.backpackSlot10Y1)
pointB = (pointA[0] + self.backpackSlot10W, pointA[1] + self.backpackSlot10H)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot10ItemType(self):
pointA = (self.backpackSlot10ItemTypeX1, self.backpackSlot10ItemTypeY1)
pointB = (pointA[0] + self.backpackSlot10ItemTypeW, pointA[1] + self.backpackSlot10ItemTypeH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot10ItemAmount(self):
pointA = (self.backpackSlot10ItemAmountX1, self.backpackSlot10ItemAmountY1)
pointB = (pointA[0] + self.backpackSlot10ItemAmountW, pointA[1] + self.backpackSlot10ItemAmountH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot10ItemBars(self):
pointA = (self.backpackSlot10ItemBarsX1, self.backpackSlot10ItemBarsY1)
pointB = (pointA[0] + self.backpackSlot10ItemBarsW, pointA[1] + self.backpackSlot10ItemBarsH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot11(self):
pointA = (self.backpackSlot11X1, self.backpackSlot11Y1)
pointB = (pointA[0] + self.backpackSlot11W, pointA[1] + self.backpackSlot11H)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot11ItemType(self):
pointA = (self.backpackSlot11ItemTypeX1, self.backpackSlot11ItemTypeY1)
pointB = (pointA[0] + self.backpackSlot11ItemTypeW, pointA[1] + self.backpackSlot11ItemTypeH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot11ItemAmount(self):
pointA = (self.backpackSlot11ItemAmountX1, self.backpackSlot11ItemAmountY1)
pointB = (pointA[0] + self.backpackSlot11ItemAmountW, pointA[1] + self.backpackSlot11ItemAmountH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot11ItemBars(self):
pointA = (self.backpackSlot11ItemBarsX1, self.backpackSlot11ItemBarsY1)
pointB = (pointA[0] + self.backpackSlot11ItemBarsW, pointA[1] + self.backpackSlot11ItemBarsH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot12(self):
pointA = (self.backpackSlot12X1, self.backpackSlot12Y1)
pointB = (pointA[0] + self.backpackSlot12W, pointA[1] + self.backpackSlot12H)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot12ItemType(self):
pointA = (self.backpackSlot12ItemTypeX1, self.backpackSlot12ItemTypeY1)
pointB = (pointA[0] + self.backpackSlot12ItemTypeW, pointA[1] + self.backpackSlot12ItemTypeH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot12ItemAmount(self):
pointA = (self.backpackSlot12ItemAmountX1, self.backpackSlot12ItemAmountY1)
pointB = (pointA[0] + self.backpackSlot12ItemAmountW, pointA[1] + self.backpackSlot12ItemAmountH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot12ItemBars(self):
pointA = (self.backpackSlot12ItemBarsX1, self.backpackSlot12ItemBarsY1)
pointB = (pointA[0] + self.backpackSlot12ItemBarsW, pointA[1] + self.backpackSlot12ItemBarsH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot13(self):
pointA = (self.backpackSlot13X1, self.backpackSlot13Y1)
pointB = (pointA[0] + self.backpackSlot13W, pointA[1] + self.backpackSlot13H)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot13ItemType(self):
pointA = (self.backpackSlot13ItemTypeX1, self.backpackSlot13ItemTypeY1)
pointB = (pointA[0] + self.backpackSlot13ItemTypeW, pointA[1] + self.backpackSlot13ItemTypeH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot13ItemAmount(self):
pointA = (self.backpackSlot13ItemAmountX1, self.backpackSlot13ItemAmountY1)
pointB = (pointA[0] + self.backpackSlot13ItemAmountW, pointA[1] + self.backpackSlot13ItemAmountH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot13ItemBars(self):
pointA = (self.backpackSlot13ItemBarsX1, self.backpackSlot13ItemBarsY1)
pointB = (pointA[0] + self.backpackSlot13ItemBarsW, pointA[1] + self.backpackSlot13ItemBarsH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot14(self):
pointA = (self.backpackSlot14X1, self.backpackSlot14Y1)
pointB = (pointA[0] + self.backpackSlot14W, pointA[1] + self.backpackSlot14H)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot14ItemType(self):
pointA = (self.backpackSlot14ItemTypeX1, self.backpackSlot14ItemTypeY1)
pointB = (pointA[0] + self.backpackSlot14ItemTypeW, pointA[1] + self.backpackSlot14ItemTypeH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot14ItemAmount(self):
pointA = (self.backpackSlot14ItemAmountX1, self.backpackSlot14ItemAmountY1)
pointB = (pointA[0] + self.backpackSlot14ItemAmountW, pointA[1] + self.backpackSlot14ItemAmountH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot14ItemBars(self):
pointA = (self.backpackSlot14ItemBarsX1, self.backpackSlot14ItemBarsY1)
pointB = (pointA[0] + self.backpackSlot14ItemBarsW, pointA[1] + self.backpackSlot14ItemBarsH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBannerBoundingBox(self):
pointA = (self.bannerBoundingBoxX1, self.bannerBoundingBoxY1)
pointB = (pointA[0] + self.bannerBoundingBoxW, pointA[1] + self.bannerBoundingBoxH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionLegendIcon(self):
pointA = (self.legendIconX1, self.legendIconY1)
pointB = (pointA[0] + self.legendIconW, pointA[1] + self.legendIconH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionUsername(self):
pointA = (self.usernameX1, self.usernameY1)
pointB = (pointA[0] + self.usernameW, pointA[1] + self.usernameH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionShieldBar(self):
pointA = (self.shieldBarX1, self.shieldBarY1)
pointB = (pointA[0] + self.shieldBarW, pointA[1] + self.shieldBarH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionHealthBar(self):
pointA = (self.healthBarX1, self.healthBarY1)
pointB = (pointA[0] + self.healthBarW, pointA[1] + self.healthBarH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionHelmetSlot(self):
pointA = (self.helmetSlotX1, self.helmetSlotY1)
pointB = (pointA[0] + self.helmetSlotW, pointA[1] + self.helmetSlotH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionArmorSlot(self):
pointA = (self.armorSlotX1, self.armorSlotY1)
pointB = (pointA[0] + self.armorSlotW, pointA[1] + self.armorSlotH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionKnockdownSlot(self):
pointA = (self.knockdownShieldSlotX1, self.knockdownShieldSlotY1)
pointB = (pointA[0] + self.knockdownShieldSlotW, pointA[1] + self.knockdownShieldSlotH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackpackSlot(self):
pointA = (self.backpackSlotX1, self.backpackSlotY1)
pointB = (pointA[0] + self.backpackSlotW, pointA[1] + self.backpackSlotH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionAbilityIcon(self):
pointA = (self.abilityIconX1, self.abilityIconY1)
pointB = (pointA[0] + self.abilityIconW, pointA[1] + self.abilityIconH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionAbilityPercentText(self):
pointA = (self.abilityPercentTextX1, self.abilityPercentTextY1)
pointB = (pointA[0] + self.abilityPercentTextW, pointA[1] + self.abilityPercentTextH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackButtonKeyBind(self):
pointA = (self.backButtonKeyBindX1, self.backButtonKeyBindY1)
pointB = (pointA[0] + self.backButtonKeyBindW, pointA[1] + self.backButtonKeyBindH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionBackButtonText(self):
pointA = (self.backButtonTextX1, self.backButtonTextY1)
pointB = (pointA[0] + self.backButtonTextW, pointA[1] + self.backButtonTextH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def drawRegionGameMenuText(self):
pointA = (self.gameMenuTextX1, self.gameMenuTextY1)
pointB = (pointA[0] + self.gameMenuTextW, pointA[1] + self.gameMenuTextH)
cv2.rectangle(self.image, pointA, pointB, self.regionColor, self.regionThickness)
def getTopMenuInventoryTextSimilarity(self):
pointA = (self.topMenuInventoryTextX1, self.topMenuInventoryTextY1)
pointB = (pointA[0] + self.topMenuInventoryTextW, pointA[1] + self.topMenuInventoryTextH)
inventoryTextImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
processedInventoryText = str(pytesseract.image_to_string(inventoryTextImage)).lower()
return self.getSimilarity(processedInventoryText, "inventory")
def getTopMenuSquadTextSimilarity(self):
pointA = (self.topMenuSquadTextX1, self.topMenuSquadTextY1)
pointB = (pointA[0] + self.topMenuSquadTextW, pointA[1] + self.topMenuSquadTextH)
inventoryTextImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
processedInventoryText = str(pytesseract.image_to_string(inventoryTextImage)).lower()
return self.getSimilarity(processedInventoryText, "squad")
def getTopMenuLegendTextSimilarity(self):
pointA = (self.topMenuLengendTextX1, self.topMenuLengendTextY1)
pointB = (pointA[0] + self.topMenuLengendTextW, pointA[1] + self.topMenuLengendTextH)
inventoryTextImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
processedInventoryText = str(pytesseract.image_to_string(inventoryTextImage)).lower()
return self.getSimilarity(processedInventoryText, "legend")
def getBackButtonTextSimilarity(self):
pointA = (self.backButtonTextX1, self.backButtonTextY1)
pointB = (pointA[0] + self.backButtonTextW, pointA[1] + self.backButtonTextH)
inventoryTextImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
processedInventoryText = str(pytesseract.image_to_string(inventoryTextImage)).lower()
return self.getSimilarity(processedInventoryText, "back")
def getGameMenuTextSimilarity(self):
pointA = (self.gameMenuTextX1, self.gameMenuTextY1)
pointB = (pointA[0] + self.gameMenuTextW, pointA[1] + self.gameMenuTextH)
inventoryTextImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
processedInventoryText = str(pytesseract.image_to_string(inventoryTextImage)).lower()
return self.getSimilarity(processedInventoryText, "game menu")
def getWeapon1TextSimilarity(self):
pointA = (self.weapon1TextX1, self.weapon1TextY1)
pointB = (pointA[0] + self.weapon1TextW, pointA[1] + self.weapon1TextH)
inventoryTextImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
processedInventoryText = str(pytesseract.image_to_string(inventoryTextImage)).lower()
return self.getSimilarity(processedInventoryText, "weapon")
def getWeapon2TextSimilarity(self):
pointA = (self.weapon2TextX1, self.weapon2TextY1)
pointB = (pointA[0] + self.weapon2TextW, pointA[1] + self.weapon2TextH)
inventoryTextImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
processedInventoryText = str(pytesseract.image_to_string(inventoryTextImage)).lower()
return self.getSimilarity(processedInventoryText, "weapon")
def isInInventoryUI(self):
confidenceLevel = 0
topMenuInventoryTextSimilarity = self.getTopMenuInventoryTextSimilarity()
topMenuSquadTextSimilarity = self.getTopMenuSquadTextSimilarity()
topMenuLegendTextSimilarity = self.getTopMenuLegendTextSimilarity()
weapon1TextSimilarity = self.getWeapon1TextSimilarity()
weapon2TextSimilarity = self.getWeapon2TextSimilarity()
backButtonTextSimilarity = self.getBackButtonTextSimilarity()
gameMenuTextSimilarity = self.getGameMenuTextSimilarity()
if(topMenuInventoryTextSimilarity >= .80):
confidenceLevel += 1
if(topMenuSquadTextSimilarity >= .80):
confidenceLevel += 1
if(topMenuLegendTextSimilarity >= .80):
confidenceLevel += 1
if(weapon1TextSimilarity >= .80):
confidenceLevel += 2
if(weapon2TextSimilarity >= .80):
confidenceLevel += 2
if(backButtonTextSimilarity >= .80):
confidenceLevel += 1
if(gameMenuTextSimilarity >= .80):
confidenceLevel += 1
if(self.confidenceLevelOutput):
print("confidenceLevel: ", confidenceLevel)
print("confidenceLevelRequirement: ", self.confidenceLevelRequirement)
print("topMenuInventoryTextSimilarity: ", topMenuInventoryTextSimilarity)
print("topMenuSquadTextSimilarity: ", topMenuSquadTextSimilarity)
print("topMenuLegendTextSimilarity: ", topMenuLegendTextSimilarity)
print("weapon1TextSimilarity: ", weapon1TextSimilarity)
print("weapon2TextSimilarity: ", weapon2TextSimilarity)
print("backButtonTextSimilarity: ", backButtonTextSimilarity)
print("gameMenuTextSimilarity: ", gameMenuTextSimilarity)
return True if confidenceLevel >= self.confidenceLevelRequirement else False
def isBackpackSlot5Locked(self):
pointA = (self.backpackSlot5X1, self.backpackSlot5Y1)
pointB = (pointA[0] + self.backpackSlot5W, pointA[1] + self.backpackSlot5H)
backpackSlot5Image = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
grayBackpackSlot5Image = cv2.cvtColor(backpackSlot5Image, cv2.COLOR_BGR2GRAY)
isLockedResult = cv2.matchTemplate(grayBackpackSlot5Image, self.backpackSlotLockedTemplate, cv2.TM_CCOEFF)
isEmptyResult = cv2.matchTemplate(grayBackpackSlot5Image, self.backpackSlotEmptyTemplate, cv2.TM_CCOEFF)
isLockedMaxValue = cv2.minMaxLoc(isLockedResult)[1]
isEmptyMaxValue = cv2.minMaxLoc(isEmptyResult)[1]
return True if isLockedMaxValue > isEmptyMaxValue else False
def isBackpackSlot6Locked(self):
pointA = (self.backpackSlot6X1, self.backpackSlot6Y1)
pointB = (pointA[0] + self.backpackSlot6W, pointA[1] + self.backpackSlot6H)
backpackSlot6Image = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
grayBackpackSlot6Image = cv2.cvtColor(backpackSlot6Image, cv2.COLOR_BGR2GRAY)
isLockedResult = cv2.matchTemplate(grayBackpackSlot6Image, self.backpackSlotLockedTemplate, cv2.TM_CCOEFF)
isEmptyResult = cv2.matchTemplate(grayBackpackSlot6Image, self.backpackSlotEmptyTemplate, cv2.TM_CCOEFF)
isLockedMaxValue = cv2.minMaxLoc(isLockedResult)[1]
isEmptyMaxValue = cv2.minMaxLoc(isEmptyResult)[1]
return True if isLockedMaxValue > isEmptyMaxValue else False
def isBackpackSlot7Locked(self):
pointA = (self.backpackSlot7X1, self.backpackSlot7Y1)
pointB = (pointA[0] + self.backpackSlot7W, pointA[1] + self.backpackSlot7H)
backpackSlot7Image = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
grayBackpackSlot7Image = cv2.cvtColor(backpackSlot7Image, cv2.COLOR_BGR2GRAY)
isLockedResult = cv2.matchTemplate(grayBackpackSlot7Image, self.backpackSlotLockedTemplate, cv2.TM_CCOEFF)
isEmptyResult = cv2.matchTemplate(grayBackpackSlot7Image, self.backpackSlotEmptyTemplate, cv2.TM_CCOEFF)
isLockedMaxValue = cv2.minMaxLoc(isLockedResult)[1]
isEmptyMaxValue = cv2.minMaxLoc(isEmptyResult)[1]
return True if isLockedMaxValue > isEmptyMaxValue else False
def isBackpackSlot12Locked(self):
pointA = (self.backpackSlot12X1, self.backpackSlot12Y1)
pointB = (pointA[0] + self.backpackSlot12W, pointA[1] + self.backpackSlot12H)
backpackSlot12Image = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
grayBackpackSlot12Image = cv2.cvtColor(backpackSlot12Image, cv2.COLOR_BGR2GRAY)
isLockedResult = cv2.matchTemplate(grayBackpackSlot12Image, self.backpackSlotLockedTemplate, cv2.TM_CCOEFF)
isEmptyResult = cv2.matchTemplate(grayBackpackSlot12Image, self.backpackSlotEmptyTemplate, cv2.TM_CCOEFF)
isLockedMaxValue = cv2.minMaxLoc(isLockedResult)[1]
isEmptyMaxValue = cv2.minMaxLoc(isEmptyResult)[1]
return True if isLockedMaxValue > isEmptyMaxValue else False
def isBackpackSlot13Locked(self):
pointA = (self.backpackSlot13X1, self.backpackSlot13Y1)
pointB = (pointA[0] + self.backpackSlot13W, pointA[1] + self.backpackSlot13H)
backpackSlot13Image = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
grayBackpackSlot13Image = cv2.cvtColor(backpackSlot13Image, cv2.COLOR_BGR2GRAY)
isLockedResult = cv2.matchTemplate(grayBackpackSlot13Image, self.backpackSlotLockedTemplate, cv2.TM_CCOEFF)
isEmptyResult = cv2.matchTemplate(grayBackpackSlot13Image, self.backpackSlotEmptyTemplate, cv2.TM_CCOEFF)
isLockedMaxValue = cv2.minMaxLoc(isLockedResult)[1]
isEmptyMaxValue = cv2.minMaxLoc(isEmptyResult)[1]
return True if isLockedMaxValue > isEmptyMaxValue else False
def isBackpackSlot14Locked(self):
pointA = (self.backpackSlot14X1, self.backpackSlot14Y1)
pointB = (pointA[0] + self.backpackSlot14W, pointA[1] + self.backpackSlot14H)
backpackSlot14Image = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
grayBackpackSlot14Image = cv2.cvtColor(backpackSlot14Image, cv2.COLOR_BGR2GRAY)
isLockedResult = cv2.matchTemplate(grayBackpackSlot14Image, self.backpackSlotLockedTemplate, cv2.TM_CCOEFF)
isEmptyResult = cv2.matchTemplate(grayBackpackSlot14Image, self.backpackSlotEmptyTemplate, cv2.TM_CCOEFF)
isLockedMaxValue = cv2.minMaxLoc(isLockedResult)[1]
isEmptyMaxValue = cv2.minMaxLoc(isEmptyResult)[1]
return True if isLockedMaxValue > isEmptyMaxValue else False
def getHealthAmount(self):
pointA = (self.healthBarX1, self.healthBarY1)
pointB = (pointA[0] + self.healthBarW, pointA[1] + self.healthBarH)
healthBarImage = self.image[pointA[1]:pointB[1], pointA[0]:pointB[0]]
grayHealthBarImage = cv2.cvtColor(healthBarImage, cv2.COLOR_BGR2GRAY)
binaryHealthBarImage = cv2.threshold(grayHealthBarImage, 200, 255, cv2.THRESH_BINARY)[1]
# cv2.imwrite("../output/healthBar.jpg", healthBarImage)
# cv2.imwrite("../output/grayHealthBar.jpg", grayHealthBarImage)
# cv2.imwrite("../output/binaryHealthBar.jpg", | |
except Exception:
return None
else:
raise PreventUpdate
@app.callback(
ServersideOutput('UploadedDF','data'),
Output('filename_append','children'),
Output('dataset-info-open','disabled'),
Input('model_select','value'),
Input('upload_data','filename'),
Input('upload_data','contents'),
Input('DOE','data')
)
def ParsedData(model,filename,content,DOE):
if model=='offline':
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'upload_data' in changed_id:
df=ParseData(content,filename)
children=[filename]
if df.shape[0]==DOE.shape[0]:
return df,children,False
else:
return None,'Error',True
else:
raise PreventUpdate
else:
raise PreventUpdate
@app.callback(
Output("dataset-info", "is_open"),
[Input("dataset-info-open", "n_clicks"), Input("dataset-info-close", "n_clicks")],
[State("dataset-info", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('dataset_filename','children'),
Output('upload_data_table','data'),
Output('upload_data_table','columns'),
Input('filename_append','children'),
Input("dataset-info", "is_open"),
Input("UploadedDF",'data'),
Input("DOE",'data')
)
def DatasetInfo(filename,is_open,df,DOE):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'dataset-info' in changed_id:
if is_open:
data=[]
vals=np.column_stack((df,DOE))
for i in range(vals.shape[0]):
val_dict = {}
for j in range(vals.shape[1]):
if j==0:
val_dict['model_evaluations'] = vals[i][j]
else:
val_dict['DOE_{}'.format(j)] = vals[i][j]
if j==vals.shape[1]-1:
data.append(val_dict)
print(data)
columns = [
{'name': i, 'id': i, 'deletable': False, 'type': 'numeric', 'format': Format(precision=4)}
for i in data[0].keys()]
return filename,data,columns
else:
raise PreventUpdate
else:
raise PreventUpdate
###################################################################
# Callback for disabling Compute Uncertainty button
###################################################################
@app.callback(
Output('CU_button','disabled'),
[
Input('basis_button','n_clicks'),
Input('input_func','value'),
Input('AP_button','n_clicks'),
Input('model_select','value'),
Input('UploadedDF','data')
],
)
def CheckifCCClickd(n_clicks,input_val,ap,model,uploaded):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'AP_button' in changed_id:
return True
else:
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'basis_button' or 'input_func' in changed_id:
if model=='analytical':
if n_clicks>0 and input_val is not None:
return False
else:
return True
else:
if n_clicks>0 and uploaded is not None:
return False
else:
return True
else:
return True
###################################################################
# Callback to map input boxes to distributions
###################################################################
@app.callback(
Output({'type': 'params', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'params_2', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'min_val', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'max_val', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'params', 'index': dash.dependencies.MATCH}, 'disabled'),
Output({'type': 'params_2', 'index': dash.dependencies.MATCH}, 'disabled'),
Output({'type': 'min_val', 'index': dash.dependencies.MATCH}, 'disabled'),
Output({'type': 'max_val', 'index': dash.dependencies.MATCH}, 'disabled'),
[Input({'type': 'drop-1', 'index': dash.dependencies.MATCH}, 'value')],
prevent_initial_callback=True,
)
def UpdateInputField(value):
show = False
hide = True
if value is None:
return ['...', '...', '...', '...', hide, hide, hide]
elif value in MEAN_VAR_DIST:
return 'Mean...', 'Variance...', ' ', ' ', show, show, hide, hide
elif value in LOWER_UPPER_DIST:
return '', '', 'Lower bound...', 'Upper bound...', hide, hide, show, show
elif value in SHAPE_PARAM_DIST:
return 'Shape parameter...', ' ', '', '', show, hide, hide, hide
elif value in ALL_4:
return 'Shape param. A...', 'Shape param. B...', 'Lower bound...', 'Upper bound...', show, show, show, show
# @app.callback(
# Output({'type':'radio_pdf','index': dash.dependencies.ALL},'disabled'),
# Input('AP_button','n_clicks'),
# prevent_intial_call=True
# )
# def Toggle(n_clicks):
# changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
# print(changed_id)
# if 'basis_button' in changed_id:
# return [{'disabled':False}]
# else:
# val={'disabled':True}
# return [val]*n_clicks
###################################################################
# Callback to create EQ Param Objects
###################################################################
@app.callback(
ServersideOutput('ParamsObject', 'data'),
Output('ndims','data'),
[
Input({'type': 'params', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'params_2', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'drop-1', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'max_val', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'min_val', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'order', 'index': dash.dependencies.ALL}, 'value'),
Input('basis_button','n_clicks'),
],
prevent_intial_call=True
)
def ParamListUpload(shape_parameter_A, shape_parameter_B, distribution, max_val, min_val, order,basis_click):
i = len(distribution)
param_list = []
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'basis_button' in changed_id:
if i > 0:
for j in range(i):
if distribution[j] in MEAN_VAR_DIST:
if (shape_parameter_A[j] and shape_parameter_B[j] and order[j]) is None:
return None,None
if order[j]<0:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], shape_parameter_A=shape_parameter_A[j]
, shape_parameter_B=shape_parameter_B[j], lower=min_val[j],
upper=max_val[j],
order=order[j])
elif distribution[j] in ALL_4:
if (shape_parameter_A[j] and shape_parameter_B[j] and min_val[j] and max_val[j] and order[j]) is None:
return None,None
elif min_val[j]>max_val[j]:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], shape_parameter_A=shape_parameter_A[j]
, shape_parameter_B=shape_parameter_B[j], lower=min_val[j], upper=max_val[j],
order=order[j])
elif distribution[j] in SHAPE_PARAM_DIST:
if (shape_parameter_A[j] and order[j]) is None:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], shape_parameter_A=shape_parameter_A[j],
order=order[j])
elif distribution[j] in LOWER_UPPER_DIST:
if (min_val[j] and max_val[j] and order[j]) is None:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], lower=min_val[j], upper=max_val[j], order=order[j])
param_list.append(param)
return param_list,len(param_list)
else:
raise PreventUpdate
###################################################################
# Function to compute s_values and pdf
###################################################################
def CreateParam(distribution, shape_parameter_A, shape_parameter_B, min, max, order):
param_obj = eq.Parameter(distribution=distribution, shape_parameter_A=shape_parameter_A,
shape_parameter_B=shape_parameter_B,
lower=min, upper=max, order=order)
s_values, pdf = param_obj.get_pdf()
return param_obj, s_values, pdf
###################################################################
# Misc Callbacks
###################################################################
# More info collapsable
@app.callback(
Output("data-info", "is_open"),
[Input("data-info-open", "n_clicks"), Input("data-info-close", "n_clicks")],
[State("data-info", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
###################################################################
# Callback to plot pdf
###################################################################
@app.callback(
Output('plot_pdf', 'figure'),
Input({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value'),
[State({'type': 'params', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'params_2', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'drop-1', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'max_val', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'min_val', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'order', 'index': dash.dependencies.ALL}, 'value'),
],
prevent_initial_call=True
)
def PlotPdf(pdf_val, param1_val, params2_val, drop1_val, max_val, min_val, order):
layout = {'margin': {'t': 0, 'r': 0, 'l': 0, 'b': 0},
'paper_bgcolor': 'white', 'plot_bgcolor': 'white', 'autosize': True,
"xaxis":{"title": r'$x$'}, "yaxis": {"title": 'PDF'}}
fig = go.Figure(layout=layout)
fig.update_xaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
fig.update_yaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
ctx = dash.callback_context
id = ctx.triggered[0]['prop_id'].split('.')[0]
idx = ast.literal_eval(id)['index']
elem = [0, 'val_{}'.format(idx)]
check = elem in pdf_val
if check:
i = pdf_val.index(elem)
if param1_val and params2_val is None:
param, s_values, pdf = CreateParam(distribution=drop1_val[i], shape_parameter_A=param1_val[i],
shape_parameter_B=params2_val[i], min=min_val[i], max=max_val[i], order=order[i])
fig.add_trace(go.Scatter(x=s_values, y=pdf, line=dict(color='rgb(0,176,246)'), fill='tonexty', mode='lines',
name='Polyfit', line_width=4, line_color='black')),
else:
param, s_values, pdf = CreateParam(distribution=drop1_val[i], shape_parameter_A=param1_val[i],
shape_parameter_B=params2_val[i], min=min_val[i], max=max_val[i],order=order[i])
fig.add_trace(go.Scatter(x=s_values, y=pdf, line=dict(color='rgb(0,176,246)'), fill='tonexty')),
return fig
###################################################################
# Callback to handle toggle switch in param definition card
###################################################################
@app.callback(
Output({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value'),
prevent_initial_call=True
)
def setToggles(pdf_val):
ctx = dash.callback_context
id = ctx.triggered[0]['prop_id'].split('.')[0]
idx = ast.literal_eval(id)['index']
elem = [0, 'val_{}'.format(idx)]
check = elem in pdf_val
ret_vals = pdf_val
if check:
i = pdf_val.index(elem)
ret_vals[i] = elem
for j in range(len(ret_vals)):
if j != i:
ret_vals[j] = [0]
test = [[0] if j != i else elem for j, x in enumerate(pdf_val)]
return ret_vals
###################################################################
# Callback to disable basis card input boxes based on basis selection
###################################################################
@app.callback(
Output('q_val', 'disabled'),
Output('levels', 'disabled'),
Output('basis_growth_rule', 'disabled'),
[Input('drop_basis', 'value')],
prevent_initial_call=True
)
def BasisShow(value):
show = False
hide = True
if value is not None:
if value == 'sparse-grid':
return hide, show, show
elif value == 'hyperbolic-basis':
return show, hide, hide
else:
return hide, hide, hide
else:
return hide, hide, hide
def Set_Basis(basis_val, order, level, q_val, growth_rule):
basis_set = eq.Basis('{}'.format(basis_val), orders=order, level=level, q=q_val, growth_rule=growth_rule)
return basis_set
def Set_Polynomial(parameters, basis, method):
mypoly = eq.Poly(parameters=parameters, basis=basis, method=method)
return mypoly
###################################################################
# Callback for automatic selection of solver method based on basis selection
###################################################################
@app.callback(
Output('solver_method', 'value'),
Input('drop_basis', 'value'),
prevent_initial_call=True
)
def SetMethod(drop_basis):
if drop_basis == 'total-order':
return 'least-squares'
else:
return 'numerical-integration'
###################################################################
# Callback for setting basis
###################################################################
@app.callback(
Output('op_box', 'value'),
ServersideOutput('BasisObject', 'data'),
Output('compute-warning','is_open'),
Output('compute-warning','children'),
Input('ParamsObject', 'data'),
Input('basis_button','n_clicks'),
State('drop_basis', 'value'),
State('q_val', 'value'),
State('levels', 'value'),
State('basis_growth_rule', 'value'),
prevent_initial_call=True
)
def SetBasis(param_obj,n_clicks,basis_select, q_val, levels, growth_rule):
# Compute subspace (if button has just been pressed)
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'basis_button' in changed_id:
if param_obj is not None:
if basis_select is None:
return 'Error...',None,True,'No basis value selected'
elif basis_select=='sparse-grid' and (levels or growth_rule) is None:
return 'ERROR...',None,True,'Enter the required values'
else:
basis_ord=[]
for elem in param_obj:
basis_ord.append(elem.order)
mybasis = Set_Basis(basis_val=basis_select, order=basis_ord, level=levels, q_val=q_val, growth_rule=growth_rule)
return mybasis.get_cardinality(), mybasis, False, None
else:
return 'ERROR...',None,True,'Incorrect parameter values'
else:
raise PreventUpdate
###################################################################
# Plotting Function: To plot basis 1D/2D/3D
###################################################################
@app.callback(
Output('plot_basis', 'figure'),
ServersideOutput('DOE','data'),
Input('ParamsObject', 'data'),
Input('BasisObject', 'data'),
Input('solver_method', 'value'),
Input('ndims','data')
)
def PlotBasis(params, mybasis, method, ndims):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'ParamsObject' in changed_id:
if mybasis is not None:
# Fit a poly just to get points (this isn't used elsewhere)
mypoly = Set_Polynomial(params, mybasis, method)
DOE = mypoly.get_points()
layout = {'margin': {'t': 0, 'r': 0, 'l': 0, 'b': 0},
'paper_bgcolor': 'white', 'plot_bgcolor': 'white', 'autosize': True,
"xaxis":{"title": r'$x_1$'}, "yaxis": {"title": r'$x_2$'}}
fig = go.Figure(layout=layout)
fig.update_xaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
fig.update_yaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
if ndims == 1:
fig.add_trace(go.Scatter(x=DOE[:,0], y=np.zeros_like(DOE[:,0]), mode='markers',marker=dict(size=8, color="rgb(144, 238, 144)", opacity=1,
line=dict(color='rgb(0,0,0)', width=1))))
fig.update_yaxes(visible=False)
return fig,DOE
elif ndims == 2:
fig.add_trace(go.Scatter(x=DOE[:, 0], y=DOE[:, 1],mode='markers',marker=dict(size=8, color="rgb(144, 238, 144)", opacity=0.6,
line=dict(color='rgb(0,0,0)', width=1))))
return fig,DOE
elif ndims>=3:
fig.update_layout(dict(margin={'t': 0, 'r': 0, 'l': 0, 'b': 0, 'pad': 10}, autosize=True,
scene=dict(
aspectmode='cube',
xaxis=dict(
title=r'$x_1$',
gridcolor="white",
showbackground=False,
linecolor='black',
tickcolor='black',
ticks='outside',
zerolinecolor="white", ),
yaxis=dict(
title=r'$x_2$',
gridcolor="white",
showbackground=False,
linecolor='black',
tickcolor='black',
ticks='outside',
zerolinecolor="white"),
zaxis=dict(
title=r'$x_3$',
backgroundcolor="rgb(230, 230,200)",
gridcolor="white",
showbackground=False,
linecolor='black',
tickcolor='black',
ticks='outside',
zerolinecolor="white", ),
),
))
fig.add_trace(go.Scatter3d(x=DOE[:, 0], y=DOE[:, 1], z=DOE[:, 2], mode='markers',
marker=dict(size=8, color="rgb(144, 238, 144)", opacity=0.6, line=dict(color='rgb(0,0,0)', width=1))))
return fig,DOE
else:
raise PreventUpdate
else:
raise PreventUpdate
else:
raise PreventUpdate
###################################################################
# Callback to set Poly object, calculate mean, variance, r2_score
###################################################################
@app.callback(
ServersideOutput('PolyObject', 'data'),
Output('mean', 'value'),
Output('variance', 'value'),
Output('r2_score', 'value'),
Output('input-warning','is_open'),
Output('input-warning','children'),
Output('poly-warning','is_open'),
Output('poly-warning','children'),
Trigger('CU_button', 'n_clicks'),
Input('ParamsObject', 'data'),
Input('BasisObject', 'data'),
Input('solver_method', 'value'),
Input('model_select','value'),
Input('UploadedDF','data'),
State('input_func', 'value'),
State('ndims', 'data'),
prevent_initial_call=True
)
def SetModel(params,mybasis,method,model,data,expr,ndims):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'CU_button' in changed_id:
mypoly = Set_Polynomial(params, mybasis, method)
if | |
16,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18,
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19,
19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31,
31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32], dtype=int64), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13], dtype=int64))
>>> a2
(array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9], dtype=int64), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15], dtype=int64))
>>> b2
(array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9], dtype=int64), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, | |
color: #444;
text-decoration: none;
/* 2 */
outline: none;
box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.1);
}
/*
* Active
* `li` needed for higher specificity to override hover
*/
.uk-subnav-pill > .uk-active > * {
background: #009dd8;
color: #fff;
box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.2);
}
/* Disabled state
========================================================================== */
.uk-subnav > .uk-disabled > * {
background: none;
color: #999;
text-decoration: none;
cursor: text;
box-shadow: none;
}
/* ========================================================================
Component: Breadcrumb
========================================================================== */
/*
* 1. Remove default list style
* 2. Remove whitespace between child elements when using `inline-block`
*/
.uk-breadcrumb {
/* 1 */
padding: 0;
list-style: none;
/* 2 */
font-size: 0.001px;
}
/* Items
========================================================================== */
/*
* Reset whitespace hack
*/
.uk-breadcrumb > li {
font-size: 1rem;
vertical-align: top;
}
.uk-breadcrumb > li,
.uk-breadcrumb > li > a,
.uk-breadcrumb > li > span {
display: inline-block;
}
.uk-breadcrumb > li:nth-child(n+2):before {
content: "/";
display: inline-block;
margin: 0 8px;
}
/*
* Disabled
*/
.uk-breadcrumb > li:not(.uk-active) > span {
color: #999;
}
/* ========================================================================
Component: Pagination
========================================================================== */
/*
* 1. Remove default list style
* 2. Center pagination by default
* 3. Remove whitespace between child elements when using `inline-block`
*/
.uk-pagination {
/* 1 */
padding: 0;
list-style: none;
/* 2 */
text-align: center;
/* 3 */
font-size: 0.001px;
}
/*
* Micro clearfix
* Needed if `uk-pagination-previous` or `uk-pagination-next` sub-objects are used
*/
.uk-pagination:before,
.uk-pagination:after {
content: "";
display: table;
}
.uk-pagination:after {
clear: both;
}
/* Items
========================================================================== */
/*
* 1. Reset whitespace hack
* 2. Remove the gap at the bottom of it container
*/
.uk-pagination > li {
display: inline-block;
/* 1 */
font-size: 1rem;
/* 2 */
vertical-align: top;
}
.uk-pagination > li:nth-child(n+2) {
margin-left: 5px;
}
/*
* 1. Makes pagination more robust against different box-sizing use
* 2. Reset text-align to center if alignment modifier is used
*/
.uk-pagination > li > a,
.uk-pagination > li > span {
display: inline-block;
min-width: 16px;
padding: 3px 5px;
line-height: 20px;
text-decoration: none;
/* 1 */
box-sizing: content-box;
/* 2 */
text-align: center;
border-radius: 4px;
}
/*
* Links
*/
.uk-pagination > li > a {
background: #f7f7f7;
color: #444;
border: 1px solid rgba(0, 0, 0, 0.2);
border-bottom-color: rgba(0, 0, 0, 0.3);
background-origin: border-box;
background-image: -webkit-linear-gradient(top, #fff, #eee);
background-image: linear-gradient(to bottom, #fff, #eee);
text-shadow: 0 1px 0 #fff;
}
/*
* Hover
* 1. Apply hover style also to focus state
* 2. Remove default focus style
*/
.uk-pagination > li > a:hover,
.uk-pagination > li > a:focus {
background-color: #fafafa;
color: #444;
/* 2 */
outline: none;
background-image: none;
}
/* OnClick */
.uk-pagination > li > a:active {
background-color: #f5f5f5;
color: #444;
border-color: rgba(0, 0, 0, 0.2);
border-top-color: rgba(0, 0, 0, 0.3);
background-image: none;
box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.1);
}
/*
* Active
*/
.uk-pagination > .uk-active > span {
background: #009dd8;
color: #fff;
border: 1px solid rgba(0, 0, 0, 0.2);
border-bottom-color: rgba(0, 0, 0, 0.4);
background-origin: border-box;
background-image: -webkit-linear-gradient(top, #00b4f5, #008dc5);
background-image: linear-gradient(to bottom, #00b4f5, #008dc5);
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
}
/*
* Disabled
*/
.uk-pagination > .uk-disabled > span {
background-color: #fafafa;
color: #999;
border: 1px solid rgba(0, 0, 0, 0.2);
text-shadow: 0 1px 0 #fff;
}
/* Previous and next navigation
========================================================================== */
.uk-pagination-previous {
float: left;
}
.uk-pagination-next {
float: right;
}
/* Alignment modifiers
========================================================================== */
.uk-pagination-left {
text-align: left;
}
.uk-pagination-right {
text-align: right;
}
/* ========================================================================
Component: Tab
========================================================================== */
.uk-tab {
margin: 0;
padding: 0;
list-style: none;
border-bottom: 1px solid #ddd;
}
/*
* Micro clearfix on the deepest container
*/
.uk-tab:before,
.uk-tab:after {
content: "";
display: table;
}
.uk-tab:after {
clear: both;
}
/*
* Items
* 1. Create position context for dropdowns
*/
.uk-tab > li {
margin-bottom: -1px;
float: left;
/* 1 */
position: relative;
}
.uk-tab > li > a {
display: block;
padding: 8px 12px 8px 12px;
border: 1px solid transparent;
border-bottom-width: 0;
color: #07D;
text-decoration: none;
border-radius: 4px 4px 0 0;
text-shadow: 0 1px 0 #fff;
}
.uk-tab > li:nth-child(n+2) > a {
margin-left: 5px;
}
/*
* Hover
* 1. Apply hover style also to focus state
* 2. Also apply if dropdown is opened
* 3. Remove default focus style
*/
.uk-tab > li > a:hover,
.uk-tab > li > a:focus,
.uk-tab > li.uk-open > a {
border-color: #ddd;
background: #fafafa;
color: #059;
/* 2 */
outline: none;
}
.uk-tab > li:not(.uk-active) > a:hover,
.uk-tab > li:not(.uk-active) > a:focus,
.uk-tab > li.uk-open:not(.uk-active) > a {
margin-bottom: 1px;
padding-bottom: 7px;
}
/* Active */
.uk-tab > li.uk-active > a {
border-color: #ddd;
border-bottom-color: transparent;
background: #fff;
color: #444;
}
/* Disabled */
.uk-tab > li.uk-disabled > a {
color: #999;
cursor: text;
}
.uk-tab > li.uk-disabled > a:hover,
.uk-tab > li.uk-disabled > a:focus,
.uk-tab > li.uk-disabled.uk-active > a {
background: none;
border-color: transparent;
}
/* Modifier: 'tab-flip'
========================================================================== */
.uk-tab-flip > li {
float: right;
}
.uk-tab-flip > li:nth-child(n+2) > a {
margin-left: 0;
margin-right: 5px;
}
/* Modifier: 'tab-responsive'
========================================================================== */
.uk-tab > li.uk-tab-responsive > a {
margin-left: 0;
margin-right: 0;
}
/*
* Icon
*/
.uk-tab-responsive > a:before {
content: "\\f0c9\\00a0";
font-family: FontAwesome;
}
/* Modifier: 'tab-center'
========================================================================== */
.uk-tab-center {
border-bottom: 1px solid #ddd;
}
.uk-tab-center-bottom {
border-bottom: none;
border-top: 1px solid #ddd;
}
.uk-tab-center:before,
.uk-tab-center:after {
content: "";
display: table;
}
.uk-tab-center:after {
clear: both;
}
/*
* 1. Using `right` to prevent vertical scrollbar caused by centering if to many tabs
*/
.uk-tab-center .uk-tab {
position: relative;
right: 50%;
border: none;
float: right;
}
.uk-tab-center .uk-tab > li {
position: relative;
right: -50%;
}
.uk-tab-center .uk-tab > li > a {
text-align: center;
}
/* Modifier: 'tab-bottom'
========================================================================== */
.uk-tab-bottom {
border-top: 1px solid #ddd;
border-bottom: none;
}
.uk-tab-bottom > li {
margin-top: -1px;
margin-bottom: 0;
}
.uk-tab-bottom > li > a {
padding-top: 8px;
padding-bottom: 8px;
border-bottom-width: 1px;
border-top-width: 0;
}
.uk-tab-bottom > li:not(.uk-active) > a:hover,
.uk-tab-bottom > li:not(.uk-active) > a:focus,
.uk-tab-bottom > li.uk-open:not(.uk-active) > a {
margin-bottom: 0;
margin-top: 1px;
padding-bottom: 8px;
padding-top: 7px;
}
.uk-tab-bottom > li.uk-active > a {
border-top-color: transparent;
border-bottom-color: #ddd;
}
/* Modifier: 'tab-grid'
========================================================================== */
/*
* 1. Create position context to prevent hidden border because of negative `z-index`
*/
.uk-tab-grid {
margin-left: -5px;
border-bottom: none;
/* 1 */
position: relative;
z-index: 0;
}
.uk-tab-grid:before {
display: block;
position: absolute;
left: 5px;
right: 0;
bottom: -1px;
border-top: 1px solid #ddd;
/* 1 */
z-index: -1;
}
.uk-tab-grid > li:first-child > a {
margin-left: 5px;
}
.uk-tab-grid > li > a {
text-align: center;
}
/*
* If `uk-tab-bottom`
*/
.uk-tab-grid.uk-tab-bottom {
border-top: none;
}
.uk-tab-grid.uk-tab-bottom:before {
top: -1px;
bottom: auto;
}
/* Modifier: 'tab-left', 'tab-right'
========================================================================== */
/* Tablet and bigger */
@media (min-width: 768px) {
.uk-tab-left,
.uk-tab-right {
border-bottom: none;
}
.uk-tab-left > li,
.uk-tab-right > li {
margin-bottom: 0;
float: none;
}
.uk-tab-left > li > a,
.uk-tab-right > li > a {
padding-top: 8px;
padding-bottom: 8px;
}
.uk-tab-left > li:nth-child(n+2) > a,
.uk-tab-right > li:nth-child(n+2) > a {
margin-left: 0;
margin-top: 5px;
}
.uk-tab-left > li.uk-active > a,
.uk-tab-right > li.uk-active > a {
border-color: #ddd;
}
/*
* Modifier: 'tab-left'
*/
.uk-tab-left {
border-right: 1px solid #ddd;
}
.uk-tab-left > li {
margin-right: -1px;
}
.uk-tab-left > li > a {
border-bottom-width: 1px;
border-right-width: 0;
}
.uk-tab-left > li:not(.uk-active) > a:hover,
.uk-tab-left > li:not(.uk-active) > a:focus {
margin-bottom: 0;
margin-right: 1px;
padding-bottom: 8px;
padding-right: 11px;
}
.uk-tab-left > li.uk-active > a {
border-right-color: transparent;
}
/*
* Modifier: 'tab-right'
*/
.uk-tab-right {
border-left: 1px solid #ddd;
}
.uk-tab-right > li {
margin-left: -1px;
}
.uk-tab-right > li > a {
border-bottom-width: 1px;
border-left-width: 0;
}
.uk-tab-right > li:not(.uk-active) > a:hover,
.uk-tab-right > li:not(.uk-active) > a:focus {
margin-bottom: 0;
margin-left: 1px;
padding-bottom: 8px;
padding-left: 11px;
}
.uk-tab-right > li.uk-active > a {
border-left-color: transparent;
}
}
/* Modifier: `uk-tab-bottom'
========================================================================== */
.uk-tab-bottom > li > a {
border-radius: 0 0 4px 4px;
}
/* Modifier: `uk-tab-left', `uk-tab-right'
========================================================================== */
/* Tablet and bigger */
@media (min-width: 768px) {
/*
* Modifier: `uk-tab-left'
*/
.uk-tab-left > li > a {
border-radius: 4px 0 0 4px;
}
/*
* Modifier: `uk-tab-right'
*/
.uk-tab-right > li > a {
border-radius: 0 4px 4px 0;
}
}
/* ========================================================================
Component: Thumbnav
========================================================================== */
/*
* 1. Gutter
* 2. Remove default list style
*/
.uk-thumbnav {
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-ms-flex-wrap: wrap;
-webkit-flex-wrap: wrap;
flex-wrap: wrap;
/* 1 */
margin-left: -10px;
margin-top: -10px;
/* 2 */
padding: 0;
list-style: none;
}
/*
* 1. Space is allocated solely based on content dimensions
* 2. Horizontal gutter is using `padding` so `uk-width-*` classes can be applied
*/
.uk-thumbnav > * {
| |
<gh_stars>0
# File: volatility_connector.py
#
# Copyright (c) 2014-2016 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Phantom imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from phantom.vault import Vault
# THIS Connector imports
from volatility_consts import *
import uuid
import os
import glob
import re
import shutil
import sys
import fnmatch
# Volatility imports
# pylint: disable=E0611
import volatility.conf as vol_conf
import volatility.registry as registry
import volatility.commands as vol_commands
import volatility.addrspace as addrspace
# import volatility.plugins.filescan as filescan
import volatility.plugins.vadinfo as vadinfo
import volatility.utils as vol_utils
import volatility.plugins.malware.malfind as malfind
import volatility.protos as protos
# Code to execute inorder to use volatility as a library
# TODO: Move these to initialize()
registry.PluginImporter()
vol_config = vol_conf.ConfObject()
registry.register_global_options(vol_config, vol_commands.Command)
registry.register_global_options(vol_config, addrspace.BaseAddressSpace)
cmds = registry.get_plugin_classes(vol_commands.Command, lower=True)
# the following argv 'work around' is to keep volatility happe
# and _also_ debug the connector as a script via pudb
try:
argv_temp = list(sys.argv)
except:
pass
sys.argv = ['']
vol_config.parse_options()
class VolatilityConnector(BaseConnector):
ACTION_ID_GET_PSINFO = "get_psinfo"
ACTION_ID_EXTRACT_PROCESS = "get_process_image"
ACTION_ID_RUN_EXHAUSTIVE_CMDS = "run_exhaustive_commands"
ACTION_ID_RUN_DRIVERSCAN = "run_driverscan"
ACTION_ID_RUN_MUTANTSCAN = "run_mutantscan"
ACTION_ID_RUN_FILESCAN = "run_filescan"
ACTION_ID_RUN_HIVELIST = "run_hivelist"
ACTION_ID_RUN_MALFIND = "run_malfind"
ACTION_ID_RUN_SHELLBAGS = "run_shellbags"
ACTION_ID_RUN_TIMELINER = "run_timeliner"
ACTION_ID_RUN_CMDSCAN = "run_cmdscan"
ACTION_ID_RUN_PRINTKEY = "run_printkey"
ACTION_ID_RUN_MFTPARSER = "run_mftparser"
ACTION_ID_RUN_SOCKSCAN = "run_sockscan"
ACTION_ID_RUN_IEHISTORY = "run_iehistory"
ACTION_ID_LIST_CONNECTIONS = "list_connections"
def __init__(self):
# Call the BaseConnectors init first
super(VolatilityConnector, self).__init__()
def initialize(self):
return self._get_vol_py_path(self)
def _get_vol_py_path(self, result):
app_dir = os.path.dirname(os.path.abspath(__file__))
matches = []
for root, dirnames, filenames in os.walk("{0}/dependencies".format(app_dir)):
for filename in fnmatch.filter(filenames, 'vol.py'):
matches.append(os.path.join(root, filename))
if (not matches):
return result.set_status(phantom.APP_ERROR, "Unable to find vol.py in app directory")
# The first instance that matches is good
self._vol_py_path = matches[0]
return (phantom.APP_SUCCESS)
def _get_profile(self, vol_config, cmds, action_result):
imageinfo = cmds['imageinfo'](vol_config)
action_result.set_status(phantom.APP_ERROR, VOL_ERR_UNABLE_TO_CHOOSE_A_PROFILE)
try:
for label, type, value in imageinfo.calculate():
# self.debug_print('label', label)
if (re.search('.*Suggested.*Profile.*', label)):
# self.debug_print('value', value)
m = re.search('(.*?),.*', value)
if m:
profile = m.group(1)
# self.debug_print('profile', profile)
return (action_result.set_status(phantom.APP_SUCCESS), profile)
except Exception as e:
action_result.set_status(phantom.APP_ERROR, VOL_ERR_GET_PROFILE, e)
return (action_result.get_status(), None)
def _handle_psinfo(self, vault_id, vol_config, cmds, action_result):
# First execute the dlllist plugin
dlllist = cmds['dlllist'](vol_config)
# the dlllist dictionary where the pid is the key
dll_list = {}
for obj in dlllist.calculate():
pid = "{}".format(obj.UniqueProcessId)
if (obj.Peb):
curr_dict = {}
curr_dict['command_line'] = "{}".format(str(obj.Peb.ProcessParameters.CommandLine or ''))
dll_list[pid] = curr_dict
modules = obj.get_load_modules()
try:
path = next(modules)
except StopIteration:
continue
curr_dict['path'] = str(path.FullDllName)
# Now run the psscan plugin
psscan = cmds['psscan'](vol_config)
num_of_processes = 0
for obj in psscan.calculate():
num_of_processes += 1
pid = "{}".format(obj.UniqueProcessId)
curr_dict = {
"offset": "{}".format(hex(int(obj.obj_offset))),
"name": "{}".format(obj.ImageFileName),
"pid": "{}".format(obj.UniqueProcessId),
"ppid": "{}".format(obj.InheritedFromUniqueProcessId),
"pdb": "{}".format(hex(int(obj.Pcb.DirectoryTableBase))),
"time_created": "{}".format(obj.CreateTime or ''),
"time_exited": "{}".format(obj.ExitTime or ''),
"command_line": "",
"path": ""}
# get info from dll list if present
if (pid in dll_list):
if ('command_line' in dll_list[pid]):
curr_dict['command_line'] = dll_list[pid]['command_line']
if ('path' in dll_list[pid]):
curr_dict['path'] = dll_list[pid]['path']
action_result.add_data(curr_dict)
data_size = action_result.get_data_size()
if (not data_size):
# psscan did not complete successfully, try pslist
self.debug_print("psscan did not yield any results, trying pslist")
pslist = cmds['pslist'](vol_config)
num_of_processes = 0
for obj in pslist.calculate():
num_of_processes += 1
pid = "{}".format(obj.UniqueProcessId)
curr_dict = {
"offset": "{}".format(hex(int(obj.obj_offset))),
"name": "{}".format(obj.ImageFileName),
"pid": "{}".format(obj.UniqueProcessId),
"ppid": "{}".format(obj.InheritedFromUniqueProcessId),
"pdb": "{}".format(hex(int(obj.Pcb.DirectoryTableBase))),
"time_created": "{}".format(obj.CreateTime or ''),
"time_exited": "{}".format(obj.ExitTime or ''),
"command_line": "",
"path": ""}
# get info from dll list if present
if (pid in dll_list):
if ('command_line' in dll_list[pid]):
curr_dict['command_line'] = dll_list[pid]['command_line']
if ('path' in dll_list[pid]):
curr_dict['path'] = dll_list[pid]['path']
action_result.add_data(curr_dict)
action_result.update_summary({VOL_JSON_NUM_PROCESSES: num_of_processes})
return action_result.set_status(phantom.APP_SUCCESS)
def _move_file_to_vault(self, container_id, file_size, type_str, contains, local_file_path, action_result):
self.save_progress(phantom.APP_PROG_ADDING_TO_VAULT)
# lets move the data into the vault
vault_details = action_result.add_data({})
if (not file_size):
file_size = os.path.getsize(local_file_path)
vault_details[phantom.APP_JSON_SIZE] = file_size
vault_details[phantom.APP_JSON_TYPE] = type_str
vault_details[phantom.APP_JSON_CONTAINS] = contains
vault_details[phantom.APP_JSON_ACTION_NAME] = self.get_action_name()
vault_details[phantom.APP_JSON_APP_RUN_ID] = self.get_app_run_id()
file_name = os.path.basename(local_file_path)
vault_ret_dict = Vault.add_attachment(local_file_path, container_id, file_name, vault_details)
if (vault_ret_dict.get('succeeded')):
vault_details[phantom.APP_JSON_VAULT_ID] = vault_ret_dict[phantom.APP_JSON_HASH]
vault_details[phantom.APP_JSON_NAME] = file_name
action_result.set_status(phantom.APP_SUCCESS, VOL_SUCC_FILE_ADD_TO_VAULT,
vault_id=vault_ret_dict[phantom.APP_JSON_HASH])
else:
# print vault_ret_dict['message']
action_result.set_status(phantom.APP_ERROR, phantom.APP_ERR_FILE_ADD_TO_VAULT)
action_result.append_to_message('. ' + vault_ret_dict['message'])
return vault_details
def _handle_process_extraction(self, vault_id, vault_file, profile, param):
# Create and make the temp directory for this vault_file
temp_dir = "/vault/tmp/{}".format(str(uuid.uuid4()))
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if not os.path.exists(temp_dir):
return self.set_status(phantom.APP_ERROR, VOL_ERR_CANNOT_MAKE_TEMP_FOLDER)
# Get the comma separated pid list
pid_comma_separated = phantom.get_req_value(param, phantom.APP_JSON_PID)
# Create an array of pid, get_list_from_string will remove blanks, empty elements and duplicates
pid_list = phantom.get_list_from_string(pid_comma_separated)
# Create the comma separated list again without the spaces, dumpfiles spits an error
# if the pids are anything but comma separated
pid_comma_separated = ','.join(pid_list)
# The volatility command
vol_command = "python2.7 {0} --filename={1} --profile={2} dumpfiles -n ".format(self._vol_py_path, vault_file, profile)
vol_command += " --dump-dir {} -p {}".format(temp_dir, pid_comma_separated)
# self.debug_print('vol_command', vol_command)
# Execute it
try:
sout, serr, cmd_ret_code = phantom.run_ext_command(vol_command)
except Exception as e:
self.debug_print("Failed to execute '{0}'".format(vol_command), e)
action_result = self.add_action_result(ActionResult(dict(param)))
return action_result.set_status(phantom.APP_ERROR, "Failed to execute volatility command")
# We ignore the return values of this command because it silently fails, the only
# way to find out if the pid was extracted is to check for it's presence on disk
# and fail if not found
for pid in pid_list:
# Create a action result to store this pid's status
action_result = self.add_action_result(ActionResult(dict(param)))
# Set the parameter
action_result.update_param({phantom.APP_JSON_VAULT_ID: vault_id, phantom.APP_JSON_PID: pid})
# Update the summary with the profile used
action_result.update_summary({VOL_JSON_PROFILE_USED: profile})
# Create a path to the image file
image_filename = '{}/file.{}.*.exe.img'.format(temp_dir, pid)
# Check if it exists
files_matched = glob.glob(image_filename)
# Only one should match since we are giving a pid
if (len(files_matched) == 1):
out_file_name = files_matched[0]
self.debug_print('File Name', out_file_name)
self._move_file_to_vault(self.get_container_id(), os.path.getsize(out_file_name),
VOL_CONST_EXTRACTED_PROCESS_FILE_TYPE, [VOL_CONST_EXTRACTED_PROCESS_FILE_TYPE, 'hash'],
out_file_name, action_result)
else:
action_result.set_status(phantom.APP_ERROR, VOL_ERR_EXTRACTED_PROCESS,
files_matched=len(files_matched),
should_match='1')
# TODO: Write a util function to delete a non-empty directory.
# os.rmdir or shutil.rmtree will not work
# os.rmdir(temp_dir)
return action_result.get_status()
def _run_vol_cmd_shell(self, vol_plugin_cmd, vault_id, vault_file, profile, action_result, additional_switch=[]):
temp_dir = "/vault/tmp/{}".format(str(uuid.uuid4()))
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if not os.path.exists(temp_dir):
return action_result.set_status(phantom.APP_ERROR, VOL_ERR_CANNOT_MAKE_TEMP_FOLDER)
out_file_name = "{0}/{1}.txt".format(temp_dir,
vol_plugin_cmd.replace(' ', '_'))
vol_command = []
vol_command.append('python2.7')
vol_command.append(self._vol_py_path)
vol_command.append("--filename={}".format(vault_file))
vol_command.append("--profile={}".format(profile))
vol_command.append(vol_plugin_cmd)
vol_command.extend(additional_switch)
self.debug_print('vol_command', vol_command)
try:
sout, serr, cmd_ret_code = phantom.run_ext_command(vol_command)
except Exception as e:
self.debug_print("Failed to execute '{0}'".format(vol_command), e)
return action_result.set_status(phantom.APP_ERROR, "Failed to execute volatility command")
if (cmd_ret_code != 0):
action_result.set_status(phantom.APP_ERROR, VOL_ERR_COMMAND, command=vol_plugin_cmd)
action_result.append_to_message('. ' + serr.strip('\r\n '))
return action_result.get_status()
# write the stdout to the file
with open(out_file_name, "w") as out_fp:
out_fp.write(sout)
# Add the name of the input vault_id file to the output file, it looks better, shows some relationship
vault_file_info = Vault.get_file_info(container_id=self.get_container_id(), vault_id=vault_id)
self.debug_print('vault_file_info: {0}'.format(vault_file_info))
if (len(vault_file_info) > 0):
generate_name = "{0}/{1}-{2}.txt".format(temp_dir,
vault_file_info[0][phantom.APP_JSON_NAME],
vol_plugin_cmd.replace(' ', '_'))
shutil.move(out_file_name, generate_name)
out_file_name = generate_name
type_str = VOL_CONST_FORENSIC_FILE_TYPE.format(vol_plugin_cmd)
self._move_file_to_vault(self.get_container_id(), os.path.getsize(out_file_name),
type_str, [type_str], out_file_name, action_result)
# TODO: Write a util function to delete a non-empty directory.
# os.rmdir or shutil.rmtree will not work
# os.rmdir(temp_dir)
return action_result.get_status()
def _run_mftparser_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('mftparser', vault_id, vault_file, profile, action_result)
def _run_timeliner_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('timeliner', vault_id, vault_file, profile, action_result)
def _run_cmdscan_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('cmdscan', vault_id, vault_file, profile, action_result)
def _run_printkey_cmd(self, vault_id, vault_file, profile, action_result, param):
additional_switch = []
additional_switch.append('-K')
additional_switch.append(str(param[VOL_JSON_KEY]))
if (VOL_JSON_HIVE_ADDRESS in param):
additional_switch.append('-o')
additional_switch.append(str(param[VOL_JSON_HIVE_ADDRESS]))
return self._run_vol_cmd_shell('printkey', vault_id, vault_file, profile, action_result, additional_switch)
def _run_shellbags_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('shellbags', vault_id, vault_file, profile, action_result)
def _run_iehistory_cmd(self, vol_config, cmds, action_result):
iehistory = cmds['iehistory'](vol_config)
for process, hist_record in iehistory.calculate():
location = "{}".format(hist_record.Url)
# strip all the data before http if present
url_location = location.find("http")
url = location[url_location:] if (url_location != -1) else location
curr_data = {
"offset": "{}".format(hex(int(hist_record.obj_offset))),
"pid": "{}".format(process.UniqueProcessId),
"image_filename": "{}".format(process.ImageFileName),
"cache_type": "{}".format(hist_record.Signature),
"record_length": "{}".format(hist_record.Length),
"location": "{}".format(location),
"url": "{}".format(url),
}
if (hist_record.obj_name == '_URL_RECORD'):
curr_data['last_modified'] = "{}".format(hist_record.LastModified)
curr_data['last_accessed'] = "{}".format(hist_record.LastAccessed)
curr_data['file_offset'] = "{}".format(hist_record.FileOffset)
curr_data['data_offset'] = "{}".format(hist_record.DataOffset)
curr_data['data_length'] = "{}".format(hist_record.DataSize)
if (hist_record.FileOffset > 0):
curr_data['file'] = "{}".format(hist_record.File)
if (hist_record.has_data()):
curr_data['data'] = "{}".format(hist_record.Data)
action_result.add_data(curr_data)
return action_result.set_status(phantom.APP_SUCCESS)
def _list_connections(self, vol_config, cmds, action_result):
if (vol_config.PROFILE.find('WinXP') != -1):
return self._run_connscan_cmd(vol_config, cmds, action_result)
return self._run_netscan_cmd(vol_config, cmds, action_result)
| |
0x1b7c),
Register('spr2e0', 4, 0x1b80),
Register('spr2e1', 4, 0x1b84),
Register('spr2e2', 4, 0x1b88),
Register('spr2e3', 4, 0x1b8c),
Register('spr2e4', 4, 0x1b90),
Register('spr2e5', 4, 0x1b94),
Register('spr2e6', 4, 0x1b98),
Register('spr2e7', 4, 0x1b9c),
Register('spr2e8', 4, 0x1ba0),
Register('spr2e9', 4, 0x1ba4),
Register('spr2ea', 4, 0x1ba8),
Register('spr2eb', 4, 0x1bac),
Register('spr2ec', 4, 0x1bb0),
Register('spr2ed', 4, 0x1bb4),
Register('spr2ee', 4, 0x1bb8),
Register('spr2ef', 4, 0x1bbc),
Register('spr2f0', 4, 0x1bc0),
Register('spr2f1', 4, 0x1bc4),
Register('spr2f2', 4, 0x1bc8),
Register('spr2f3', 4, 0x1bcc),
Register('spr2f4', 4, 0x1bd0),
Register('spr2f5', 4, 0x1bd4),
Register('spr2f6', 4, 0x1bd8),
Register('spr2f7', 4, 0x1bdc),
Register('spr2f8', 4, 0x1be0),
Register('spr2f9', 4, 0x1be4),
Register('spr2fa', 4, 0x1be8),
Register('spr2fb', 4, 0x1bec),
Register('spr2fc', 4, 0x1bf0),
Register('spr2fd', 4, 0x1bf4),
Register('spr2fe', 4, 0x1bf8),
Register('spr2ff', 4, 0x1bfc),
Register('spr300', 4, 0x1c00),
Register('spr301', 4, 0x1c04),
Register('spr302', 4, 0x1c08),
Register('spr303', 4, 0x1c0c),
Register('spr304', 4, 0x1c10),
Register('spr305', 4, 0x1c14),
Register('spr306', 4, 0x1c18),
Register('spr307', 4, 0x1c1c),
Register('spr308', 4, 0x1c20),
Register('spr309', 4, 0x1c24),
Register('spr30a', 4, 0x1c28),
Register('spr30b', 4, 0x1c2c),
Register('spr30c', 4, 0x1c30),
Register('spr30d', 4, 0x1c34),
Register('spr30e', 4, 0x1c38),
Register('spr30f', 4, 0x1c3c),
Register('spr310', 4, 0x1c40),
Register('spr311', 4, 0x1c44),
Register('spr312', 4, 0x1c48),
Register('spr313', 4, 0x1c4c),
Register('spr314', 4, 0x1c50),
Register('spr315', 4, 0x1c54),
Register('spr316', 4, 0x1c58),
Register('spr317', 4, 0x1c5c),
Register('spr318', 4, 0x1c60),
Register('spr319', 4, 0x1c64),
Register('spr31a', 4, 0x1c68),
Register('spr31b', 4, 0x1c6c),
Register('spr31c', 4, 0x1c70),
Register('spr31d', 4, 0x1c74),
Register('spr31e', 4, 0x1c78),
Register('spr31f', 4, 0x1c7c),
Register('spr320', 4, 0x1c80),
Register('spr321', 4, 0x1c84),
Register('spr322', 4, 0x1c88),
Register('spr323', 4, 0x1c8c),
Register('spr324', 4, 0x1c90),
Register('spr325', 4, 0x1c94),
Register('spr326', 4, 0x1c98),
Register('spr327', 4, 0x1c9c),
Register('spr328', 4, 0x1ca0),
Register('spr329', 4, 0x1ca4),
Register('spr32a', 4, 0x1ca8),
Register('spr32b', 4, 0x1cac),
Register('spr32c', 4, 0x1cb0),
Register('spr32d', 4, 0x1cb4),
Register('spr32e', 4, 0x1cb8),
Register('tar', 4, 0x1cbc),
Register('spr330', 4, 0x1cc0),
Register('spr331', 4, 0x1cc4),
Register('spr332', 4, 0x1cc8),
Register('spr333', 4, 0x1ccc),
Register('spr334', 4, 0x1cd0),
Register('spr335', 4, 0x1cd4),
Register('spr336', 4, 0x1cd8),
Register('spr337', 4, 0x1cdc),
Register('spr338', 4, 0x1ce0),
Register('spr339', 4, 0x1ce4),
Register('spr33a', 4, 0x1ce8),
Register('spr33b', 4, 0x1cec),
Register('spr33c', 4, 0x1cf0),
Register('spr33d', 4, 0x1cf4),
Register('spr33e', 4, 0x1cf8),
Register('spr33f', 4, 0x1cfc),
Register('spr340', 4, 0x1d00),
Register('spr341', 4, 0x1d04),
Register('spr342', 4, 0x1d08),
Register('spr343', 4, 0x1d0c),
Register('spr344', 4, 0x1d10),
Register('spr345', 4, 0x1d14),
Register('spr346', 4, 0x1d18),
Register('spr347', 4, 0x1d1c),
Register('spr348', 4, 0x1d20),
Register('spr349', 4, 0x1d24),
Register('spr34a', 4, 0x1d28),
Register('spr34b', 4, 0x1d2c),
Register('spr34c', 4, 0x1d30),
Register('spr34d', 4, 0x1d34),
Register('spr34e', 4, 0x1d38),
Register('spr34f', 4, 0x1d3c),
Register('spr350', 4, 0x1d40),
Register('spr351', 4, 0x1d44),
Register('spr352', 4, 0x1d48),
Register('spr353', 4, 0x1d4c),
Register('spr354', 4, 0x1d50),
Register('spr355', 4, 0x1d54),
Register('spr356', 4, 0x1d58),
Register('spr357', 4, 0x1d5c),
Register('spr358', 4, 0x1d60),
Register('spr359', 4, 0x1d64),
Register('spr35a', 4, 0x1d68),
Register('spr35b', 4, 0x1d6c),
Register('spr35c', 4, 0x1d70),
Register('spr35d', 4, 0x1d74),
Register('spr35e', 4, 0x1d78),
Register('spr35f', 4, 0x1d7c),
Register('spr360', 4, 0x1d80),
Register('spr361', 4, 0x1d84),
Register('spr362', 4, 0x1d88),
Register('spr363', 4, 0x1d8c),
Register('spr364', 4, 0x1d90),
Register('spr365', 4, 0x1d94),
Register('spr366', 4, 0x1d98),
Register('spr367', 4, 0x1d9c),
Register('spr368', 4, 0x1da0),
Register('spr369', 4, 0x1da4),
Register('spr36a', 4, 0x1da8),
Register('spr36b', 4, 0x1dac),
Register('spr36c', 4, 0x1db0),
Register('spr36d', 4, 0x1db4),
Register('spr36e', 4, 0x1db8),
Register('spr36f', 4, 0x1dbc),
Register('spr370', 4, 0x1dc0),
Register('spr371', 4, 0x1dc4),
Register('spr372', 4, 0x1dc8),
Register('spr373', 4, 0x1dcc),
Register('spr374', 4, 0x1dd0),
Register('spr375', 4, 0x1dd4),
Register('spr376', 4, 0x1dd8),
Register('spr377', 4, 0x1ddc),
Register('spr378', 4, 0x1de0),
Register('spr379', 4, 0x1de4),
Register('spr37a', 4, 0x1de8),
Register('spr37b', 4, 0x1dec),
Register('spr37c', 4, 0x1df0),
Register('spr37d', 4, 0x1df4),
Register('spr37e', 4, 0x1df8),
Register('spr37f', 4, 0x1dfc),
Register('spr380', 4, 0x1e00),
Register('spr381', 4, 0x1e04),
Register('spr382', 4, 0x1e08),
Register('spr383', 4, 0x1e0c),
Register('spr384', 4, 0x1e10),
Register('spr385', 4, 0x1e14),
Register('spr386', 4, 0x1e18),
Register('spr387', 4, 0x1e1c),
Register('spr388', 4, 0x1e20),
Register('spr389', 4, 0x1e24),
Register('spr38a', 4, 0x1e28),
Register('spr38b', 4, 0x1e2c),
Register('spr38c', 4, 0x1e30),
Register('spr38d', 4, 0x1e34),
Register('spr38e', 4, 0x1e38),
Register('spr38f', 4, 0x1e3c),
Register('spr390', 4, 0x1e40),
Register('spr391', 4, 0x1e44),
Register('spr392', 4, 0x1e48),
Register('spr393', 4, 0x1e4c),
Register('spr394', 4, 0x1e50),
Register('spr395', 4, 0x1e54),
Register('spr396', 4, 0x1e58),
Register('spr397', 4, 0x1e5c),
Register('spr398', 4, 0x1e60),
Register('spr399', 4, 0x1e64),
Register('spr39a', 4, 0x1e68),
Register('spr39b', 4, 0x1e6c),
Register('spr39c', 4, 0x1e70),
Register('spr39d', 4, 0x1e74),
Register('spr39e', 4, 0x1e78),
Register('spr39f', 4, 0x1e7c),
Register('spr3a0', 4, 0x1e80),
Register('spr3a1', 4, 0x1e84),
Register('spr3a2', 4, 0x1e88),
Register('spr3a3', 4, 0x1e8c),
Register('spr3a4', 4, 0x1e90),
Register('spr3a5', 4, 0x1e94),
Register('spr3a6', 4, 0x1e98),
Register('spr3a7', 4, 0x1e9c),
Register('spr3a8', 4, 0x1ea0),
Register('spr3a9', 4, 0x1ea4),
Register('spr3aa', 4, 0x1ea8),
Register('spr3ab', 4, 0x1eac),
Register('spr3ac', 4, 0x1eb0),
Register('spr3ad', 4, 0x1eb4),
Register('spr3ae', 4, 0x1eb8),
Register('spr3af', 4, 0x1ebc),
Register('spr3b0', 4, 0x1ec0),
Register('spr3b1', 4, 0x1ec4),
Register('spr3b2', 4, 0x1ec8),
Register('spr3b3', 4, 0x1ecc),
Register('spr3b4', 4, 0x1ed0),
Register('spr3b5', 4, 0x1ed4),
Register('spr3b6', 4, 0x1ed8),
Register('spr3b7', 4, 0x1edc),
Register('spr3b8', 4, 0x1ee0),
Register('spr3b9', 4, 0x1ee4),
Register('spr3ba', 4, 0x1ee8),
Register('spr3bb', 4, 0x1eec),
Register('spr3bc', 4, 0x1ef0),
Register('spr3bd', 4, 0x1ef4),
Register('spr3be', 4, 0x1ef8),
Register('spr3bf', 4, 0x1efc),
Register('spr3c0', 4, 0x1f00),
Register('spr3c1', 4, 0x1f04),
Register('spr3c2', 4, 0x1f08),
Register('spr3c3', 4, 0x1f0c),
Register('spr3c4', 4, 0x1f10),
Register('spr3c5', 4, 0x1f14),
Register('spr3c6', 4, 0x1f18),
Register('spr3c7', 4, 0x1f1c),
Register('spr3c8', 4, 0x1f20),
Register('spr3c9', 4, 0x1f24),
Register('spr3ca', 4, 0x1f28),
Register('spr3cb', 4, 0x1f2c),
Register('spr3cc', 4, 0x1f30),
Register('spr3cd', 4, 0x1f34),
Register('spr3ce', 4, 0x1f38),
Register('spr3cf', 4, 0x1f3c),
Register('spr3d0', 4, 0x1f40),
Register('spr3d1', 4, 0x1f44),
Register('spr3d2', 4, 0x1f48),
Register('spr3d3', 4, 0x1f4c),
Register('spr3d4', 4, 0x1f50),
Register('spr3d5', 4, 0x1f54),
Register('spr3d6', 4, 0x1f58),
Register('spr3d7', 4, 0x1f5c),
Register('spr3d8', 4, 0x1f60),
Register('spr3d9', 4, 0x1f64),
Register('spr3da', 4, 0x1f68),
Register('spr3db', 4, 0x1f6c),
Register('spr3dc', 4, 0x1f70),
Register('spr3dd', 4, 0x1f74),
Register('spr3de', 4, 0x1f78),
Register('spr3df', 4, 0x1f7c),
Register('spr3e0', 4, 0x1f80),
Register('spr3e1', 4, 0x1f84),
Register('spr3e2', 4, 0x1f88),
Register('spr3e3', 4, 0x1f8c),
Register('spr3e4', 4, 0x1f90),
Register('spr3e5', 4, 0x1f94),
Register('spr3e6', 4, 0x1f98),
Register('spr3e7', 4, 0x1f9c),
Register('spr3e8', 4, 0x1fa0),
Register('spr3e9', 4, 0x1fa4),
Register('spr3ea', 4, 0x1fa8),
Register('spr3eb', 4, 0x1fac),
Register('spr3ec', 4, 0x1fb0),
Register('spr3ed', 4, 0x1fb4),
Register('spr3ee', 4, 0x1fb8),
Register('spr3ef', 4, 0x1fbc),
Register('spr3f0', 4, 0x1fc0),
Register('spr3f1', 4, 0x1fc4),
Register('spr3f2', 4, 0x1fc8),
Register('spr3f3', 4, 0x1fcc),
Register('spr3f4', 4, 0x1fd0),
Register('spr3f5', 4, 0x1fd4),
Register('spr3f6', 4, 0x1fd8),
Register('spr3f7', 4, 0x1fdc),
Register('spr3f8', 4, 0x1fe0),
Register('spr3f9', 4, 0x1fe4),
Register('spr3fa', 4, 0x1fe8),
Register('spr3fb', 4, 0x1fec),
Register('spr3fc', 4, 0x1ff0),
Register('spr3fd', 4, 0x1ff4),
Register('spr3fe', 4, 0x1ff8),
Register('spr3ff', 4, 0x1ffc),
Register('vs0', 16, 0x4000),
Register('f0', 8, 0x4008),
Register('vs1', 16, 0x4010),
Register('f1', 8, 0x4018),
Register('vs2', 16, 0x4020),
Register('f2', 8, 0x4028),
Register('vs3', 16, 0x4030),
Register('f3', 8, 0x4038),
Register('vs4', 16, 0x4040),
Register('f4', 8, 0x4048),
Register('vs5', 16, 0x4050),
Register('f5', 8, 0x4058),
Register('vs6', 16, 0x4060),
Register('f6', 8, 0x4068),
Register('vs7', 16, 0x4070),
Register('f7', 8, 0x4078),
Register('vs8', 16, 0x4080),
Register('f8', 8, 0x4088),
Register('vs9', 16, 0x4090),
Register('f9', 8, 0x4098),
Register('vs10', 16, 0x40a0),
Register('f10', 8, 0x40a8),
Register('vs11', 16, 0x40b0),
Register('f11', 8, 0x40b8),
Register('vs12', 16, 0x40c0),
Register('f12', 8, 0x40c8),
Register('vs13', 16, 0x40d0),
Register('f13', 8, 0x40d8),
Register('vs14', 16, 0x40e0),
Register('f14', 8, 0x40e8),
Register('vs15', 16, 0x40f0),
Register('f15', 8, 0x40f8),
Register('vs16', 16, 0x4100),
Register('f16', 8, 0x4108),
Register('vs17', 16, 0x4110),
Register('f17', 8, 0x4118),
Register('vs18', 16, 0x4120),
Register('f18', 8, 0x4128),
Register('vs19', 16, 0x4130),
Register('f19', 8, 0x4138),
Register('vs20', 16, 0x4140),
Register('f20', 8, 0x4148),
Register('vs21', 16, 0x4150),
Register('f21', 8, 0x4158),
Register('vs22', 16, 0x4160),
Register('f22', 8, 0x4168),
Register('vs23', 16, 0x4170),
Register('f23', 8, 0x4178),
Register('vs24', 16, 0x4180),
Register('f24', 8, 0x4188),
Register('vs25', 16, 0x4190),
Register('f25', 8, 0x4198),
Register('vs26', 16, 0x41a0),
Register('f26', 8, 0x41a8),
Register('vs27', 16, 0x41b0),
Register('f27', 8, 0x41b8),
Register('vs28', 16, 0x41c0),
Register('f28', 8, 0x41c8),
Register('vs29', 16, 0x41d0),
Register('f29', 8, 0x41d8),
Register('vs30', 16, 0x41e0),
Register('f30', 8, 0x41e8),
Register('vs31', 16, 0x41f0),
Register('f31', 8, 0x41f8),
Register('vs32', 16, 0x4200),
Register('vr0_64_1', 8, 0x4200),
Register('vr0_32_3', 4, 0x4200),
Register('vr0_16_7', 2, 0x4200),
Register('vr0_8_15', 1, 0x4200),
Register('vr0_8_14', 1, 0x4201),
Register('vr0_16_6', 2, 0x4202),
Register('vr0_8_13', 1, 0x4202),
Register('vr0_8_12', 1, 0x4203),
Register('vr0_32_2', 4, 0x4204),
Register('vr0_16_5', 2, 0x4204),
Register('vr0_8_11', 1, 0x4204),
Register('vr0_8_10', 1, 0x4205),
Register('vr0_16_4', 2, 0x4206),
Register('vr0_8_9', 1, 0x4206),
Register('vr0_8_8', 1, 0x4207),
Register('vr0_64_0', 8, 0x4208),
Register('vr0_32_1', 4, 0x4208),
Register('vr0_16_3', 2, 0x4208),
Register('vr0_8_7', 1, 0x4208),
Register('vr0_8_6', 1, 0x4209),
Register('vr0_16_2', 2, 0x420a),
Register('vr0_8_5', 1, 0x420a),
Register('vr0_8_4', 1, 0x420b),
Register('vr0_32_0', 4, 0x420c),
Register('vr0_16_1', 2, 0x420c),
Register('vr0_8_3', 1, 0x420c),
Register('vr0_8_2', 1, 0x420d),
Register('vr0_16_0', 2, 0x420e),
Register('vr0_8_1', 1, 0x420e),
Register('vr0_8_0', 1, 0x420f),
Register('vs33', 16, 0x4210),
Register('vr1_64_1', 8, 0x4210),
Register('vr1_32_3', 4, 0x4210),
Register('vr1_16_7', 2, 0x4210),
Register('vr1_8_15', 1, 0x4210),
Register('vr1_8_14', 1, 0x4211),
Register('vr1_16_6', 2, 0x4212),
Register('vr1_8_13', 1, 0x4212),
Register('vr1_8_12', 1, 0x4213),
Register('vr1_32_2', 4, 0x4214),
Register('vr1_16_5', 2, 0x4214),
Register('vr1_8_11', 1, 0x4214),
Register('vr1_8_10', 1, 0x4215),
Register('vr1_16_4', 2, 0x4216),
Register('vr1_8_9', 1, 0x4216),
Register('vr1_8_8', 1, 0x4217),
Register('vr1_64_0', 8, 0x4218),
Register('vr1_32_1', 4, 0x4218),
Register('vr1_16_3', 2, 0x4218),
Register('vr1_8_7', 1, 0x4218),
Register('vr1_8_6', 1, 0x4219),
Register('vr1_16_2', 2, 0x421a),
Register('vr1_8_5', 1, 0x421a),
Register('vr1_8_4', 1, 0x421b),
Register('vr1_32_0', 4, 0x421c),
Register('vr1_16_1', 2, 0x421c),
Register('vr1_8_3', 1, 0x421c),
Register('vr1_8_2', 1, 0x421d),
Register('vr1_16_0', 2, 0x421e),
Register('vr1_8_1', 1, 0x421e),
Register('vr1_8_0', 1, 0x421f),
Register('vs34', 16, 0x4220),
Register('vr2_64_1', 8, 0x4220),
Register('vr2_32_3', 4, 0x4220),
Register('vr2_16_7', 2, 0x4220),
Register('vr2_8_15', 1, 0x4220),
Register('vr2_8_14', 1, 0x4221),
Register('vr2_16_6', 2, 0x4222),
Register('vr2_8_13', 1, 0x4222),
Register('vr2_8_12', 1, 0x4223),
Register('vr2_32_2', 4, 0x4224),
Register('vr2_16_5', 2, 0x4224),
Register('vr2_8_11', 1, 0x4224),
Register('vr2_8_10', | |
=\n{}'.format(ub.repr2(result.cpu().numpy(), precision=2)))
>>> print(result.shape)
Ignore:
import xdev
globals().update(xdev.get_func_kwargs(warp_tensor))
>>> # xdoctest: +REQUIRES(module:torch)
>>> import cv2
>>> inputs = torch.arange(9).view(1, 1, 3, 3).float() + 2
>>> input_dims = inputs.shape[2:]
>>> #output_dims = (6, 6)
>>> def fmt(a):
>>> return ub.repr2(a.numpy(), precision=2)
>>> s = 2.5
>>> output_dims = tuple(np.round((np.array(input_dims) * s)).astype(int).tolist())
>>> mat = torch.FloatTensor([[s, 0, 0], [0, s, 0], [0, 0, 1]])
>>> inv = mat.inverse()
>>> warp_tensor(inputs, mat, output_dims)
>>> print('## INPUTS')
>>> print(fmt(inputs))
>>> print('\nalign_corners=True')
>>> print('----')
>>> print('## warp_tensor, align_corners=True')
>>> print(fmt(warp_tensor(inputs, inv, output_dims, isinv=True, align_corners=True)))
>>> print('## interpolate, align_corners=True')
>>> print(fmt(F.interpolate(inputs, output_dims, mode='bilinear', align_corners=True)))
>>> print('\nalign_corners=False')
>>> print('----')
>>> print('## warp_tensor, align_corners=False, new_mode=False')
>>> print(fmt(warp_tensor(inputs, inv, output_dims, isinv=True, align_corners=False)))
>>> print('## warp_tensor, align_corners=False, new_mode=True')
>>> print(fmt(warp_tensor(inputs, inv, output_dims, isinv=True, align_corners=False, new_mode=True)))
>>> print('## interpolate, align_corners=False')
>>> print(fmt(F.interpolate(inputs, output_dims, mode='bilinear', align_corners=False)))
>>> print('## interpolate (scale), align_corners=False')
>>> print(ub.repr2(F.interpolate(inputs, scale_factor=s, mode='bilinear', align_corners=False).numpy(), precision=2))
>>> cv2_M = mat.cpu().numpy()[0:2]
>>> src = inputs[0, 0].cpu().numpy()
>>> dsize = tuple(output_dims[::-1])
>>> print('\nOpen CV warp Result')
>>> result2 = (cv2.warpAffine(src, cv2_M, dsize=dsize, flags=cv2.INTER_LINEAR))
>>> print('result2 =\n{}'.format(ub.repr2(result2, precision=2)))
"""
if mode == 'linear':
mode = 'bilinear'
output_dims = tuple(map(int, output_dims))
# Determine the number of space-time dimensions
ndims = len(output_dims)
# https://discuss.pytorch.org/t/affine-transformation-matrix-paramters-conversion/19522
input_dims = inputs.shape[-ndims:]
prefix_dims = inputs.shape[:-ndims]
# Normalize the inputs so they are in 4D or 5D standard form
# I.e. either [B, C, H, W] or [B, C, D, H, W]
# We need exactly two non-spacetime (prefix) dims
if len(prefix_dims) < 2:
# Create a dummy batch / channel dimension
_part1 = [1] * (2 - len(prefix_dims))
_part2 = [-1] * len(inputs.shape)
_input_expander = _part1 + _part2
inputs_ = inputs.expand(*_input_expander)
elif len(prefix_dims) > 2:
fake_b = np.prod(prefix_dims[:-1])
fake_c = prefix_dims[-1]
# Consolodate leading dimensions into the batch dim
inputs_ = inputs.view(fake_b, fake_c, *input_dims)
else:
inputs_ = inputs
device = inputs.device
input_size = torch.Tensor(np.array(input_dims[::-1]))[None, :, None]
input_size = input_size.to(device) # [1, ndims, 1]
if len(mat.shape) not in [2, 3]:
raise ValueError('Invalid mat shape')
if mat.shape[-1] not in [3, 4] or mat.shape[-1] not in [2, 3, 4]:
# if tuple(mat.shape) != (2, 2):
raise ValueError(
'mat must have shape: '
# '(..., 2, 2) or '
'(..., 2, 3) or (..., 3, 3)'
' or (..., 3, 4) or (..., 4, 4)'
)
# Ensure that mat is a 3x3 matrix, and check if it is affine or projective
if mat.shape[-2] != mat.shape[-1]:
_homog_row = [0] * (mat.shape[-1] - 1) + [1]
homog_row = torch.Tensor(_homog_row).to(mat.device)
homog_row = homog_row.expand_as(mat[..., 0:1, :])
mat = torch.cat([homog_row, mat], dim=len(mat.shape) - 2)
ishomog = False
if ishomog is None:
ishomog = False # set to true for non-affine
if mat.shape[-2] == 3:
if not torch.all(mat[-2] != torch.Tensor([0, 0, 1])):
ishomog = True
inv = mat if isinv else mat.inverse()
if len(inv.shape) == 2:
inv = inv[None, :]
if inv.device != device:
inv = inv.to(device)
# Construct a homogenous coordinate system in the output frame where the
# input is aligned with the top left corner.
# X = ndims + 1 if ishomog else ndims
X = ndims + 1
if not TORCH_GRID_SAMPLE_HAS_ALIGN:
import warnings
warnings.warn('cannot use new mode in warp_tensor when torch < 1.3')
new_mode = False
# NOTE: grid_sample in torch<1.3 does not support align_corners=False correctly
unwarped_coords = _coordinate_grid(output_dims, align_corners=align_corners) # [X, *DIMS]
unwarped_coords = unwarped_coords.to(device)
unwarped_coords_ = unwarped_coords.view(1, X, -1) # [1, X, prod(DIMS)]
warped_coords = inv.matmul(unwarped_coords_)
if ishomog:
# If we had a projective projective transform we unhomogenize
warped_coords = warped_coords[:, 0:ndims] / warped_coords[:, ndims]
else:
# For affine we can simply discard the homogenous component
warped_coords = warped_coords[:, 0:ndims]
# Normalized the warped coordinates that align with the input to [-1, +1]
# Anything outside of the input range is mapped outside of [-1, +1]
if align_corners:
grid_coords = warped_coords * (2.0 / (input_size)) # normalize from [0, 2]
grid_coords -= 1.0 # normalize from [-1, +1]
else:
grid_coords = warped_coords * (2.0 / (input_size - 1.0)) # normalize from [0, 2]
grid_coords -= 1.0 # normalize from [-1, +1]
if new_mode:
# HACK: For whatever reason the -1,+1 extremes doesn't point to the
# extreme pixels, but applying this squish factor seems to help.
# The idea seems to be that if the input dims are D x D the
# ((D - 1) / D)-th value is what points to the middle of the bottom
# right input pixel and not (+1, +1).
# Need to figure out what's going on in a more principled way.
input_dims_ = torch.FloatTensor(list(input_dims))
squish = ((input_dims_ - 1.0) / (input_dims_))
grid_coords = grid_coords * squish[None, :, None]
if False:
# Debug output coords
print('### unwarped')
print(unwarped_coords[0:2])
print('### warped')
print(warped_coords.view(2, *output_dims))
print('### grid')
print(grid_coords.view(2, *output_dims))
F.grid_sample(inputs_, torch.FloatTensor(
[[[[-1.0, -1.0]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[-2 / 3, -2 / 3]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[0.0, 0.0]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[2 / 3, 2 / 3]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[1.0, 1.0]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_[:, :, 0:2, 0:2], torch.FloatTensor(
[[[[-1 / 2, -1 / 2]]]]), mode='bilinear', align_corners=False)
inputs_ = torch.arange(16).view(1, 1, 4, 4).float() + 1
F.grid_sample(inputs_, torch.FloatTensor(
[[[[-3 / 4, -3 / 4]]]]), mode='bilinear', align_corners=False)
for f in np.linspace(0.5, 1.0, 10):
print('f = {!r}'.format(f))
print(F.grid_sample(inputs_, torch.FloatTensor(
[[[[f, f]]]]), mode='bilinear', align_corners=False))
# The warped coordinate [-1, -1] will references to the left-top pixel of
# the input, analgously [+1, +1] references the right-bottom pixel of the
# input.
# Note: that -1, -1 refers to the center of the first pixel, not the edge.
# See:
# https://github.com/pytorch/pytorch/issues/20785
# https://github.com/pytorch/pytorch/pull/23923
# https://github.com/pytorch/pytorch/pull/24929
# https://user-images.githubusercontent.com/9757500/58150486-c5315900-7c34-11e9-9466-24f2bd431fa4.png
# # Note: Was unable to quite figure out how to use F.affine_grid
# gride_shape = torch.Size((B, C,) + tuple(output_dims))
# grid = F.affine_grid(inv[None, 0:2], gride_shape)
# outputs = F.grid_sample(inputs, grid)
# return outputs
# Reshape to dimensions compatible with grid_sample
grid_coords = grid_coords.transpose(1, 2) # swap space/coord dims
_reshaper = [1] + list(output_dims) + [ndims]
grid_coords = grid_coords.reshape(*_reshaper) # Unpack dims
_expander = [inputs_.shape[0]] + list(output_dims) + [ndims]
grid_coords = grid_coords.expand(*_expander)
# grid_coords = grid_coords.to(device)
# TODO: pass align_corners when supported in torch 1.3
# Note: enabling this breaks tests and backwards compat, so
# verify there are no problems before enabling this.
if new_mode and TORCH_GRID_SAMPLE_HAS_ALIGN:
# the new grid sample allows you to set align_corners, but I don't
# remember if the previous logic depends on the old behavior.
outputs_ = F.grid_sample(inputs_, grid_coords, mode=mode,
padding_mode=padding_mode,
align_corners=bool(align_corners))
else:
# The old grid sample always had align_corners=True
outputs_ = F.grid_sample(inputs_, grid_coords, mode=mode,
padding_mode=padding_mode,
align_corners=True)
# Unpack outputs to match original input shape
final_dims = list(prefix_dims) + list(output_dims)
outputs = outputs_.view(*final_dims)
return outputs
def subpixel_align(dst, src, index, interp_axes=None):
"""
Returns an aligned version of the source tensor and destination index.
Used as the backend to implement other subpixel functions like:
subpixel_accum, subpixel_maximum.
"""
if interp_axes is None:
# Assume spatial dimensions are trailing
interp_axes = len(dst.shape) + np.arange(-min(2, len(index)), 0)
raw_subpixel_starts = np.array([0 if sl.start is None else sl.start
for sl in index])
raw_subpixel_stops = np.array([dst.shape[i] if sl.stop is None else sl.stop
for i, sl in enumerate(index)])
raw_extent = raw_subpixel_stops - raw_subpixel_starts
if not ub.iterable(src):
# Broadcast scalars
impl = kwarray.ArrayAPI.impl(dst)
shape = tuple(raw_extent.astype(int).tolist())
src = impl.full(shape, dtype=dst.dtype, fill_value=src)
if not np.all(np.isclose(src.shape, raw_extent, atol=0.3)):
raise ValueError(
'Got src.shape = {}, but the raw slice extent was {}'.format(
tuple(src.shape), tuple(raw_extent)))
if True:
# check that all non interp slices are integral
noninterp_axes = np.where(~kwarray.boolmask(interp_axes, len(dst.shape)))[0]
for i in noninterp_axes:
assert raw_subpixel_starts[i] % 1 == 0
assert raw_subpixel_stops[i] % 1 == 0
# Clip off any out of bounds
subpixel_st, extra_padding = _rectify_slice(
dst.shape, raw_subpixel_starts, raw_subpixel_stops)
subpixel_starts = np.array([a[0] for a in subpixel_st])
subpixel_stops = np.array([a[1] for a in subpixel_st])
subpixel_pad_left = np.array([a[0] for a in extra_padding])
# subpixel_pad_right = np.array([a[1] for a in extra_padding])
# Any fractional start dimension will be a positive translate
translation | |
<reponame>woolfson-group/isambard<gh_stars>1-10
"""Base class for bio-inspired optimizers."""
import operator
import random
from deap import creator, tools
import numpy
from optimisation.base_evo_opt import BaseOptimizer, Parameter, default_build
class DE(BaseOptimizer):
"""Differential evolution optimisation algorithm.
Notes
-----
Can use neighbourhood model to reduce chance of getting stuck
in local optima. This is a very versatile algorithm, and its use
is recommended.
Parameters
----------
specification : ampal.specification.assembly_specification
An `Assembly` level specification to be optimised.
sequences : [str]
A list of sequences, one for each polymer.
parameters : [base_ev_opt.Parameter]
A list of `Parameter` objects in the same order as the
function signature expects.
build_fn : function((spec, seq, params)) -> ampal
A function for building a model using parameters supplied
by the optimizer.
eval_fn : function(ampal) -> float
An evaluation function that assesses an AMPAL object and
returns a float. This float will be used to compare models.
The optimizer uses the thermodynamic convention that lower
numbers are better.
cxpb : float
The probability of crossing two individuals.
diff_weight : float
A scaling factor for crossing.
neighbours : int or None
If not `None`, uses a neighbourhood model to reduce the
likelihood of the optimisation getting stuck in a local
optima. The number of particles to use as neighbours can
be provided as an int.
"""
def __init__(self, specification, sequences, parameters, build_fn, eval_fn,
cxpb=0.75, diff_weight=1, neighbours=None, **kwargs):
super().__init__(
specification, sequences, parameters,
build_fn=build_fn, eval_fn=eval_fn, **kwargs)
self.cxpb = cxpb
self.diff_weight = diff_weight
self.neighbours = neighbours
creator.create("Individual", list, fitness=creator.FitnessMin)
def _generate(self):
"""Generates a particle using the creator function.
Notes
-----
Position and speed are uniformly randomly seeded within
allowed bounds. The particle also has speed limit settings
taken from global values.
Returns
-------
particle object
"""
ind = creator.Individual(
[random.uniform(-1, 1)
for _ in range(len(self.value_means))])
ind.ident = None
ind.neighbours = None
return ind
def _initialize_pop(self, pop_size):
"""Assigns indices to individuals in population."""
self.toolbox.register("individual", self._generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=pop_size)
if self.neighbours:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = list(
set(
[(i - x) % len(self.population)
for x in range(1, self.neighbours + 1)] +
[(i + x) % len(self.population)
for x in range(1, self.neighbours + 1)]
))
self.assign_fitnesses(self.population)
return
def _crossover(self, ind):
"""Used by the evolution process to generate a new individual.
Notes
-----
This is a tweaked version of the classical DE crossover
algorithm, the main difference that candidate parameters are
generated using a lognormal distribution. Bound handling is
achieved by resampling where the candidate solution exceeds +/-1
Parameters
----------
ind : deap individual
Returns
-------
y : deap individual
An individual representing a candidate solution, to be
assigned a fitness.
"""
if self.neighbours:
a, b, c = random.sample([self.population[i]
for i in ind.neighbours], 3)
else:
a, b, c = random.sample(self.population, 3)
y = self.toolbox.clone(a)
y.ident = ind.ident
y.neighbours = ind.neighbours
del y.fitness.values
# y should now be a copy of ind with the vector elements from a
ident = random.randrange(len(self.value_means))
for i, value in enumerate(y):
if i == ident or random.random() < self.cxpb:
entry = a[i] + random.lognormvariate(-1.2, 0.5) * \
self.diff_weight * (b[i] - c[i])
tries = 0
while abs(entry) > 1.0:
tries += 1
entry = a[i] + random.lognormvariate(-1.2, 0.5) * \
self.diff_weight * (b[i] - c[i])
if tries > 10000:
entry = a[i]
y[i] = entry
return y
def _update_pop(self, pop_size):
"""Updates population according to crossover and fitness criteria."""
candidates = []
for ind in self.population:
candidates.append(self._crossover(ind))
self._model_count += len(candidates)
self.assign_fitnesses(candidates)
for i in range(len(self.population)):
if candidates[i].fitness > self.population[i].fitness:
self.population[i] = candidates[i]
return
class PSO(BaseOptimizer):
"""A particle swarm optimization algorithm.
Notes
-----
This is good for avoiding bias and premature minimization, though
it may struggle to find the ultimate optimum solution. Supports
the neighbourhood model. Bound handling is achieved by allowing
particles to exceed permitted bounds, but not assigning them a
fitness in this case.
Parameters
----------
specification : ampal.specification.assembly_specification
An `Assembly` level specification to be optimised.
sequences : [str]
A list of sequences, one for each polymer.
parameters : [base_ev_opt.Parameter]
A list of `Parameter` objects in the same order as the
function signature expects.
build_fn : function((spec, seq, params)) -> ampal
A function for building a model using parameters supplied
by the optimizer.
eval_fn : function(ampal) -> float
An evaluation function that assesses an AMPAL object and
returns a float. This float will be used to compare models.
The optimizer uses the thermodynamic convention that lower
numbers are better.
max_speed : float
The maximum speed that a particle can have in the swarm.
neighbours : int or None
If not `None`, uses a neighbourhood model to reduce the
likelihood of the optimisation getting stuck in a local
optima. The number of particles to use as neighbours can
be provided as an int.
"""
def __init__(self, specification, sequences, parameters, build_fn, eval_fn,
max_speed=0.75, neighbours=None, **kwargs):
super().__init__(
specification, sequences, parameters,
build_fn=build_fn, eval_fn=eval_fn, **kwargs)
self.max_speed = 0.75
self.neighbours = None
creator.create("Particle", list, fitness=creator.FitnessMin,
speed=list, smin=None, smax=None, best=None)
self.toolbox.register("particle", self._generate)
# can this pick up the global fitness?
creator.create("Swarm", list, gbest=None, gbestfit=creator.FitnessMin)
self.toolbox.register("swarm", tools.initRepeat,
creator.Swarm, self.toolbox.particle)
def _initialize_pop(self, pop_size):
"""Generates initial population with random positions and speeds."""
self.population = self.toolbox.swarm(n=pop_size)
if self.neighbours:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = list(
set(
[(i - x) % len(self.population)
for x in range(1, self.neighbours + 1)] +
[i] +
[(i + x) % len(self.population)
for x in range(1, self.neighbours + 1)]
))
else:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = [
x for x in range(len(self.population))]
self.assign_fitnesses(self.population)
for part in self.population:
part.best = creator.Particle(part)
part.best.fitness.values = part.fitness.values
return
def _generate(self):
"""Generates a particle using the creator function.
Notes
-----
Position and speed are uniformly randomly seeded within
allowed bounds. The particle also has speed limit settings
taken from global values.
Returns
-------
part : particle object
A particle used during optimisation.
"""
part = creator.Particle(
[random.uniform(-1, 1)
for _ in range(len(self.value_means))])
part.speed = [
random.uniform(-self.max_speed, self.max_speed)
for _ in range(len(self.value_means))]
part.smin = -self.max_speed
part.smax = self.max_speed
part.ident = None
part.neighbours = None
return part
def update_particle(self, part, chi=0.729843788, c=2.05):
"""Constriction factor update particle method.
Notes
-----
Looks for a list of neighbours attached to a particle and
uses the particle's best position and that of the best
neighbour.
"""
neighbour_pool = [self.population[i] for i in part.neighbours]
best_neighbour = max(neighbour_pool, key=lambda x: x.best.fitness)
ce1 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce2 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce1_p = map(operator.mul, ce1, map(operator.sub, part.best, part))
ce2_g = map(operator.mul, ce2, map(
operator.sub, best_neighbour.best, part))
chi_list = [chi] * len(part)
chi_list2 = [1 - chi] * len(part)
a = map(operator.sub,
map(operator.mul, chi_list, map(operator.add, ce1_p, ce2_g)),
map(operator.mul, chi_list2, part.speed))
part.speed = list(map(operator.add, part.speed, a))
for i, speed in enumerate(part.speed):
if speed < part.smin:
part.speed[i] = part.smin
elif speed > part.smax:
part.speed[i] = part.smax
part[:] = list(map(operator.add, part, part.speed))
return
def _update_pop(self, pop_size):
"""Assigns fitnesses to particles that are within bounds."""
valid_particles = []
invalid_particles = []
for part in self.population:
if any(x > 1 or x < -1 for x in part):
invalid_particles.append(part)
else:
valid_particles.append(part)
self._model_count += len(valid_particles)
for part in valid_particles:
self.update_particle(part)
self.assign_fitnesses(valid_particles)
for part in valid_particles:
if part.fitness > part.best.fitness:
part.best = creator.Particle(part)
part.best.fitness = part.fitness
for part in invalid_particles:
self.update_particle(part)
self.population[:] = valid_particles + invalid_particles
self.population.sort(key=lambda x: x.ident) # shouldn't need to sort?
return
class GA(BaseOptimizer):
"""A classic genetic algorithm optimization algorithm.
Notes
-----
Very good for eliminating unfavourable regions of the search space.
Can be heavily customized in terms of mutation and crossover operators
etc. Bound handling is achieved simply by amending any out of
bounds parameters to the boundary value.
Parameters
----------
specification : ampal.specification.assembly_specification
An `Assembly` level specification to be optimised.
sequences : [str]
A list of sequences, one for each polymer.
parameters : [base_ev_opt.Parameter]
A list of `Parameter` objects in the same order as the
function signature expects.
build_fn : function((spec, seq, params)) -> ampal
A function for building | |
(only to ensure they are consistent) so
the mapping of integers to physical surface types is irrelevant.
colMeasField: The string for the field associated with a column
measurement. This can be any field with exactly one extra
dimension, provided it has NaN's at the same levels as other
fields where appropriate. The canonical field to use here is
the retrieved CO mixing ratio profile.
'''
@staticmethod
def parm_list():
return ['time', 'longitude', 'inFieldNames', 'outFieldNames',
'outUnits', 'logNormal', 'dimLabels', 'dimSizes', 'timeStart',
'timeStop', 'timeComparison', 'fillVal', 'solZenAngCutoff',
'solZenAng', 'dayTime', 'surfTypeField', 'colMeasField']
@staticmethod
def required_parms():
return {'time' : ('The name of the field containing timestamps. ' \
'Timestamps are assumed to be in the TAI-93 format.' \
'\n{ MOPITT - TIME }', None),
'longitude' : ('The name of the field containing longitudes ' \
'at cell centers. Longitudes should be in ' \
'degrees east.\n{ MOPITT - Longitude }', None),
'inFieldNames' : ('The names of the fields desired to be ' \
'output. Input as comma-delimited list.', \
'list'),
'outFieldNames': ('The names of the output variables. (even ' \
'if they are to be the same as input ' \
'variables). Should be a comma-delimited ' \
'list co-indexed to inFieldNames', 'list'),
'outUnits' : ('The units of the variables to be written out.' \
' Should be a comma-delimited list co-indexed '\
'to inFieldNames', 'list'),
'logNormal' : ('List of boolean strings that specify how to ' \
'take the averages of the corresponding fields.'\
' If the string is "True" that field is ' \
'averaged assuming a lognormal distribution. ' \
'If the string is "False" that field is ' \
'averaged assuming a normal distribution. ' \
'Should be a comma-delimited list co-indexed ' \
'to inFieldNames', 'list'),
'dimLabels' : ('List of names of the extra dimensions in the ' \
'output file. Must be a semicolon-delimited ' \
'list of comma-delimited strings. Fields with no'\
'extra dimensions may be left blank. ' \
'For example, if there are four inFields, the ' \
'first and third of which have no extra ' \
'dimensions, the second of which has one ("foo"),'\
' and the fourth has two ("foo" and "bar"), the '\
'dimLabels entry should look like this: '\
';foo;;foo,bar The outer (semicolon-delimited) '\
'list must be co-indexed to inFieldNames',
'listoflists'),
'dimSizes' : ('List of the sizes of the extra dimensions in the' \
' output file. Must be a semicolon-delimited list'\
' of comma-delimited lists of integers. Fields'\
'with no extra dimensions may be left blank. ' \
'For example, if there are four inFields, the ' \
'first and third of which have no extra ' \
'dimensions, the second of which has one (which ' \
'has length four), and the fourth has two (which '\
'have lengths four and five, respectively), the '\
'dimSizes entry should look like this: ;4;;4,5 ' \
'The outer (semicolon-delimited) list must be ' \
'co-indexed to inFieldNames and all sub-lists ' \
'should be the same size as the corresponding ' \
'sublist in dimLabels.', 'listoflists'),
'timeStart' : ('The earliest time for which data should be ' \
'recorded into the output file. All times ' \
'before this time in the input file(s) will ' \
'be filtered out. Must be in the format: hh:' \
'mm:ss_MM-DD-YYYY', 'time'),
'timeStop' : ('The latest time for which data should be ' \
'recorded into the output file. All times after'\
' this time in the input file(s) will be ' \
'filtered out. Must be in the format: ' \
'hh:mm:ss_MM-DD-YYYY','time'),
'timeComparison' : ('Must be set to either "local" or "UTC". '\
'Determines how the file timestamps are ' \
'compared to the start/stop time. If set '\
'to "local", then the file timestamps are ' \
'converted to local time on a pixel-by-pixel'\
' basis (using longitude to estimate time ' \
'zone) before being compared to time ' \
'boundaries. If set to "UTC" the file ' \
'timestamps (which are assumed to be in UTC)'\
' are compared against the start/stop time '\
'directly.', None),
'fillVal' : ('The value to use as a fill value in the output '\
'netCDF file. This value will replace any '\
'missing or invalid output values', 'decimal'),
'solZenAngCutoff' : ('The solar zenith angle that defines the '\
'day to night transition (we use the SZA '\
'to separate day and night pixels, which '\
'should not be averaged together), in ' \
'degrees. The geometric value here would ' \
'be 90. Recommended value is 85.',
'decimal'),
'solZenAng' : ('The name of the field containing the solar' \
' zenith angle in degrees. { MOPITT - Solar ' \
'Zenith Angle }', None),
'dayTime' : ('Boolean variable that indicates ' \
'whether the output file should contain ' \
'values from day or night. If set to ' \
'"True" the output file will have ' \
'daylight values. If set to "False" ' \
'the output file will have night ' \
'values.', 'bool'),
'surfTypeField' : ('The name of the field containing the ' \
'surface type index.\n{ MOPITT - Surface ' \
'Index }', None),
'colMeasField' : ('The name of the field containing the ' \
'column measurement that will be used to ' \
'determine which levels are valid in a ' \
'cell. Canonically the retrieved CO mixing' \
' ratio profile field. It is assumed that ' \
'the field will have a layer dimension first' \
' and a 2-element second dimension (for ' \
'values and std devs) of which we want the ' \
'first slice.\n{ MOPITT - Retrieved CO Mixing '\
'Ratio Profile }', None)}
# variable signifying which list is to act as the master list index
__userKeys__ = "inFieldNames"
def __init__(self, pDict):
'''Convert input to format of parent input'''
# make a shallow copy to the parameter dict, as we'll be making changes
# and we don't want to mutate the argument
parmDict = dict(pDict)
# even though IO interface handles casting already,
# a catchblock has been added here for safety
# in case someone wants to use this class directly
castDict = {'time':str, 'longitude':str,
'inFieldNames':list, 'outFieldNames':list,
'outUnits':list, 'logNormal':list,
'dimLabels':list, 'dimSizes':list,
'timeStart':tai93conv, 'timeStop':tai93conv,
'timeComparison':str, 'fillVal':float,
'solZenAngCutoff':float, 'solZenAng':str,
'dayTime':bool, 'surfTypeField':str,
'colMeasField':str}
for (k,func) in castDict.items():
try:
parmDict[k] = func(parmDict[k])
except TypeError:
pass
# by this point times are already converted to TAI93 standard
# no need to convert here
parmDict['timeConv'] = lambda(x):x
# remove extraneous entries in parmDict. They will be incorporated in
# weighting and filtering functions
SZAcut = parmDict.pop('solZenAngCutoff')
SZAfield = parmDict.pop('solZenAng')
dayTime = parmDict.pop('dayTime')
surfField = parmDict.pop('surfTypeField')
colMeasField = parmDict.pop('colMeasField')
dayBool = dayTime
# note which was chosen
parmDict['notes'] = 'All values %s with cutoff at %6.2f' % \
('daytime' if dayBool else 'nighttime', SZAcut)
# create weighting function
def wghtFunc(parser, index, prevWght):
'''
Values not explicitly weighted. Values not in desired part of
diurnal cycle (as determined by solar zenith angle) are given weight
of 0 and therefore not included in final average
'''
SZA = parser.get_cm(SZAfield, index)
if dayBool and SZA <= SZAcut:
# we want day and it's day
return 1
elif not dayBool and SZA >= SZAcut:
# we want night and it's night
return 1
else:
return 0
parmDict['weightFunction'] = wghtFunc
# create filtering function
def filterFunc(parser, indStack):
'''
Filter is twofold. First filter checks if any surface type makes
up 75% of the pixels in the cell. If it does, all other surface
types are rejected. Second filter checks if column retrievals have
different numbers of valid retrievals. If they do, then the pixels
in the minority are rejected. In the | |
3 - - / - - - - - -
# 7 - - 1 1 1 - / - 4 4 4 4 - / - 6 6 6 6 -
# 6 - - 1 1 1 - / - 4 4 4 4 - / - 6 6 6 6 -
# 5 - - 1 1 1 - / - 4 4 4 4 - / - - - - - -
# 4 - - - - - - / - 4 4 4 4 - / - 7 7 7 7 -
# 3 - 2 2 2 2 - / - 4 4 4 4 - / - 7 7 7 7 -
# 2 - 2 2 2 2 - / - 4 4 4 4 - / - 7 7 7 7 -
# 1 - 2 2 2 2 - / - - - - - - / - 7 7 7 7 -
# 0 - - - - - - / - - - - - - / - - - - - -
# 0 1 2 3 4 5 / 1 3 4 5 6 7 / 4 5 6 7 8 9
# (ts 0) (ts 1) (ts 2)
#
s.n_features_ts_postmerge = [3, 1, 3]
s.new_feature_ind_ts = 0
# Define features
s.data_features_in = [
(((3, 10), (5, 12), 0, 0), "genesis"),
(((2, 5), (4, 8), 1, 0), "genesis"),
(((1, 1), (4, 3), 2, 0), "genesis"),
(((4, 8), (5, 11), 3, 1), "continuation"),
(((3, 2), (6, 7), 4, 1), "merging/splitting"),
(((5, 9), (7, 11), 5, 2), "lysis"),
(((5, 6), (8, 7), 6, 2), "lysis"),
(((5, 1), (8, 4), 7, 2), "lysis"),
]
s.data_neighbors = [(3, 4)]
s.data_feature_types_out = {
0: "genesis",
1: "genesis",
2: "genesis",
3: "merging/splitting",
5: "lysis",
6: "lysis",
7: "lysis",
}
# Feature index pairs for edges (note that the merged feature will
# inherit the lowest id of the original features)
s.data_es_inds_in = [(0, 3), (1, 4), (2, 4), (3, 5), (4, 6), (4, 7)]
s.data_es_inds_out = [(0, 3), (1, 3), (2, 3), (3, 5), (3, 6), (3, 7)]
# Data for successor probabilities
# Format: ((<fids0>), (<fids1>), <ovlp>, <ind>)
fi, fo = s.get_features_n_in, s.get_features_n_out
s.data_size_ovlp_in = [
(fi(0), fi(3), (4,), 0), # 0 <-> 3
(fi(1, 2), fi(4), (6, 4), 0), # [12]<-> 4 / 0
(fi(1, 2), fi(4), (6, 4), 1), # [12]<-> 4 / 1
(fi(3), fi(5), (3,), 0), # 3 <-> 5
(fi(4), fi(6, 7), (4, 6), 0), # 4 <->[67] / 0
(fi(4), fi(6, 7), (4, 6), 1), # 4 <->[67] / 1
]
s.data_size_ovlp_out = [
(fo(0, 1, 2), fo(3), (4, 7, 4), 0), # [012]<-> 3 / 0
(fo(0, 1, 2), fo(3), (4, 7, 4), 1), # [012]<-> 3 / 1
(fo(0, 1, 2), fo(3), (4, 7, 4), 2), # [012]<-> 3 / 2
(fo(3), fo(5, 6, 7), (3, 4, 6), 0), # 3 <->[567] / 0
(fo(3), fo(5, 6, 7), (3, 4, 6), 1), # 3 <->[567] / 1
(fo(3), fo(5, 6, 7), (3, 4, 6), 2), # 3 <->[567] / 2
]
super().setUp()
# Some parameter for the checks
s.ts_merge = 1
s.merged_features = [3, 4]
s.merged_feature_type = "merging/splitting"
# Run test
s.run_test()
s.check_results()
def test_2(s):
# Set some parameters
s.nx, s.ny = 10, 18
s.connectivity = 8
s.f_size, s.f_ovlp = 0.5, 0.5
#
# 18 - - - - - - / - - - - - / - - - - - -
# 17 - - 0 0 0 - / - 4 4 - - / - - - - - -
# 16 - - 0 0 0 - / - 4 4 - - / - 8 8 8 8 -
# 15 - - 0 0 0 - / - - - - - / - 8 8 8 8 -
# 14 - - 0 0 0 - / - 5 5 5 - / - 8 8 8 8 -
# 13 - - - - - - / - 5 5 5 - / - - - - - -
# 12 - - - - - - / - 5 5 5 - / - 9 9 9 - -
# 11 - 1 1 1 1 1 / - 6 6 6 - / - 9 9 9 - -
# 10 - 1 1 1 1 1 / - 6 6 6 - / - 9 9 9 - -
# 9 - 1 1 1 1 1 / - 6 6 6 - / - 9 9 9 - -
# 8 - - - - - - / - 6 6 6 - / - 9 9 9 - -
# 7 - - 2 2 2 - / - 6 6 6 - / - 9 9 9 - -
# 6 - - 2 2 2 - / - 6 6 6 - / - 9 9 9 - -
# 5 - - 2 2 2 - / - 6 6 6 - / - 9 9 9 - -
# 4 - - 2 2 2 - / - 6 6 6 - / - 9 9 9 - -
# 3 - - - - - - / - 6 6 6 - / - 9 9 9 - -
# 2 - - 3 3 3 - / - 7 7 7 - / - 9 9 9 - -
# 1 - - 3 3 3 - / - 7 7 7 - / - - - - - -
# 0 - - - - - - / - - - - - / - - - - - -
# 0 1 2 3 4 5 / 1 3 4 5 6 / 3 4 5 6 7 8
# (ts 0) (ts 1) (ts 2)
#
s.n_features_ts_postmerge = [4, 2, 2]
s.new_feature_ind_ts = 1
# Define features: (((x0, y0), (x1, y1), id, ts), type)
s.data_features_in = [
(((2, 14), (4, 17), 0, 0), "genesis/splitting"),
(((1, 9), (5, 11), 1, 0), "genesis"),
(((2, 4), (4, 7), 2, 0), "genesis"),
(((2, 1), (4, 2), 3, 0), "genesis"),
(((3, 16), (4, 17), 4, 1), "continuation"),
(((3, 12), (5, 14), 5, 1), "continuation"),
(((3, 3), (5, 11), 6, 1), "merging"),
(((3, 1), (5, 2), 7, 1), "continuation"),
(((4, 14), (7, 16), 8, 2), "merging/lysis"),
(((4, 2), (6, 12), 9, 2), "merging/lysis"),
]
s.data_neighbors = [(5, 6), (6, 7)]
s.data_feature_types_out = {
0: "genesis/splitting",
1: "genesis",
2: "genesis",
3: "genesis",
4: "continuation",
5: "merging/splitting",
8: "merging/lysis",
9: "lysis",
}
# Feature index pairs for edges (note that the merged feature will
# inherit the lowest id of the original features)
s.data_es_inds_in = [
(0, 4),
(0, 5),
(1, 6),
(2, 6),
(3, 7),
(4, 8),
(5, 8),
(6, 9),
(7, 9),
]
s.data_es_inds_out = [
(0, 4),
(0, 5),
(1, 5),
(2, 5),
(3, 5),
(4, 8),
(5, 8),
(5, 9),
]
# Data for successor probabilities
# Format: ((<fids0>), (<fids1>), <ovlps> <ind>)
fi, fo = s.get_features_n_in, s.get_features_n_out
s.data_size_ovlp_in = [
(fi(0), fi(4, 5), (4, 2), 0), # 0 <->[45] / 0
(fi(0), fi(4, 5), (4, 2), 1), # 0 <->[45] / 1
(fi(1, 2), fi(6), (9, 8), 0), # [12]<-> 6 / 0
(fi(1, 2), fi(6), (9, 8), 1), # [12]<-> 6 / 1
(fi(3), fi(7), (4,), 0), # 3 <-> 7
(fi(4, 5), fi(8), (1, 2), 0), # [45]<-> 8 / 0
(fi(4, 5), fi(8), (1, 2), 1), # [45]<-> 8 / 1
(fi(6, 7), fi(9), (18, 2), 0), # [67]<-> 9 / 0
(fi(6, 7), fi(9), (18, 2), 1), # | |
set using the type string given"""
from collections import defaultdict
self.typestr = typestr
self.ndims = 0
self.funcs = []
self.types = self.__class__.expandFTypeStr(typestr)
#log("For typestr %s, got types of %s" % (typestr, self.types))
self.funcs = [self.parse(s) for s in self.types]
self.times = defaultdict(float)
self.times.update(typestr=typestr, types=self.types)
def splitStr(self, s):
"""Splits a given string and checks for correctness.
Also converts a support with an arg into (support char, int param)"""
colorspaces = ['l', 'rgb', 'hsv', 'xyz', 'mo']
norms = 'nme' # none, mean, energy
supports = 'hpnmv' # histogram, pixel with scaling, pixel without scaling, mean, variance
color, norm, support = s.lower().split('.')
assert color in ''.join(colorspaces)
assert norm in norms
assert support[0] in supports # support can have a modifier after it to specify a param
if len(support) > 1:
support = (support[0], int(support[1:]))
return (color, norm, support)
def parse(self, s):
"""Parses a string and returns functions [colorspace, normalization, support]"""
color, norm, support = self.splitStr(s)
ret = []
# first handle the color spaces
def colorfunc(color):
"""Returns a function which returns an image (1 channel, all values from 0-255).
Uses the cache to store expensive image conversions"""
def colorfuncret(im, mask, cache, c=color):
def get(idx, val, cache=cache, im=im, mask=mask):
if cache is None: return lazy(val)
if idx not in cache:
cache[idx] = lazy(val)
return cache[idx]
if c in 'l': # convert to grayscale
return get('l', "im.convert('L')")
elif c in 'rgb': # split into bands and return the appropriate one
return get('rgb', "im.split()")['rgb'.index(c)]
elif c in 'hsv': # convert to hsv and return the appropriate band
return get('hsv', "rgb2hsv(im).split()")['hsv'.index(c)]
elif c in 'xyz': # convert to xyz and return appropriate band
return get('xyz', "rgb2xyz(im).split()")['xyz'.index(c)]
elif c == 'm': # use gradient magnitude
return get('m', "im.filter(ImageFilter.FIND_EDGES).convert('L')")
elif c == 'o': # use gradient orientation
return get('o', "getGradientOrientation(im.convert('L'))")
return colorfuncret
ret.append(colorfunc(color))
# now handle the normalization
def normfunc(norm):
"""Returns a function which returns a sequence (all values from 0 to roughly 255.0)."""
def normfuncret(im, mask, n=norm):
if n == 'n': # no normalization
return [v for v,m in izip(im.getdata(), mask.getdata()) if m > 0]
elif n == 'm': # mean normalization
return meanNormalize(im, mask)
elif n == 'e': # energy normalization
return energyNormalize(im, mask)
return normfuncret
ret.append(normfunc(norm))
# now handle the aggregation (either pixel, histogram, or statistics)
def scalefunc(seq):
mean = getMean(seq)
stddev = getStdDev(seq)
seq = [energyNormFunc(x, mean, stddev) for x in seq]
return [x/255.0 for x in seq]
identity = lambda x: x
if support[0] == 'p': # single pixel, with scaling
if len(support) > 1:
# subsampling
f = lambda seq: scalefunc(seq)[::support[1]]
else:
f = scalefunc
self.ndims += 1
elif support[0] == 'n': # single pixel, with no scaling
if len(support) > 1:
# subsampling
f = lambda seq: identity(seq)[::support[1]]
else:
f = scalefunc
self.ndims += 1
elif support[0] == 'h': # histogram
# optional nbins parameter
nbins = DEFAULT_HIST_NBINS if len(support) == 1 else support[1]
binsize = (256//nbins) + 1 # to make sure we have no more than nbins
def binfunc(x, binsize=binsize):
return x//binsize
f = HistType(identity, binfunc, nbins)
self.ndims += nbins
elif support == 'm': # mean
f = lambda seq: [getMean(seq)/255.0]
elif support == 'v': # variance
def varfunc(seq):
mean = getMean(seq)
if mean == 0:
var = 0
else:
var = getVariance([s/(mean+EPSILON) for s in seq])
return [var]
f = varfunc
ret.append(f)
return ret
def compute(self, im, mask, **kw):
"""Computes features for the given image and mask"""
from array import array
ret = array('f')
t1 = time.time()
cache = {}
#TODO deal with outparams, fiducials, fmtdict (all passed in kw)
for colorfunc, normfunc, aggrfunc in self.funcs:
c1 = time.time()
cim = colorfunc(im, mask, cache)
c2 = time.time()
#cim.save('color.png')
vals = normfunc(cim, mask)
c3 = time.time()
if NUMPY:
#vals = numpy.array(vals)
pass
#log("Got %s after normfunc, with mean %s, min %s, max %s, stddev %s" % (vals, getMean(vals), min(vals), max(vals), getStdDev(vals)))
vals = aggrfunc(vals)
c4 = time.time()
#log("Got %s after aggrfunc, with mean %s, min %s, max %s, stddev %s" % (vals, getMean(vals), min(vals), max(vals), getStdDev(vals)))
ret.extend(vals)
c5 = time.time()
self.times['colorfunc'] += c2-c1
self.times['normfunc'] += c3-c2
self.times['aggrfunc'] += c4-c3
self.times['Extend ret'] += c5-c4
#log(len(ret))
t2 = time.time()
self.times['Total Fset Compute Time'] += t2-t1
return ret
class MaskFunc(object):
"""A class to generate masking functions (regions).
Given a 'mask string', this generates a masking function.
This function takes in an input image and returns a mask image of the same size.
The mask image is of type '1' (binary), and is 1 where the mask is ON (valid).
The typical usage is like this:
mfunc = MaskFunc('+left eye;+right eye')
for im in images:
mask = mfunc.compute(im)
The mask string is composed of an arbitrary number of elements joined using ';'
where each element is of the form:
<bool><shape>
The <bool> is either '+' or '-' to turn that shape on or off.
The mask is progressively iterated upon, starting with 0 (completely off),
with each mask element modifying the mask so far.
The <shape> is either a primitive shape or a pre-defined region (more common).
Primitives are defined as:
<primitive type>:<coords>
The different primitive types and the coords they take are:
rect: minx, miny, maxx, maxy
poly: a series of x,y points
oval: the bounding rect of the oval (minx, miny, maxx, maxy)
The coords are always specified in flat comma-separated lists.
The coordinate system is as follows:
- the left and right edges of the image are x=-1 and x=1
- this allows for easy symmetry, since x=0 is the middle
- the top and bottom edges of the image are y=0 and y=1
Pre-defined regions are defined in the config file, and are composed
of an arbitrary number of other regions (which must eventually resolve
down to primitives). The mask string can contain either:
<region>
or:
<region>*<x-factor>,<y-factor>
The x- and y-factors are used to scale each primitive in the region by the
given factor in each direction, relative to the primitive's center. In the
first form given above, both factors are assumed to be 1.
The config file is in JSON format, with all the data under the 'regions' field.
If there is no 'regions' field, then it tries to use the whole json structure.
Here is an example:
{"regions": {
"all": {
"comps": [{"shape": "rect", "coords": [-1,0,1,1]}],
},
"left arm": {
"align": "front",
"comps": [{"shape": "rect", "coords": [-0.1,0,-0.5,1]}]
},
"right arm": {
"align": "front",
"comps": [{"shape": "rect", "coords": [0.5,0,1,1]}]
},
"arms": {
"comps": [{"region": "left arm"}, {"region": "right arm"}]
}
}}
Each region is defined using a name, and contains fields:
'align': [optional] The name of the alignment this region requires.
'comps': A list of components this region is made of. Each component
is either a primitive or a reference to another region.
The primitives are defined as:
'shape': One of the primitive shape types
'coords': The list of coordinates for this shape
The references to other regions are defined as:
'region': The name of the other region to substitute in here.
Upon parsing, all references will be recursively expanded out to
primitives. Make sure there is no infinite loop!
A single MaskFunc can only have a single 'align' type. This is to keep the
final interface simple. You can access this using the 'align' parameter.
"""
SHAPES = 'rect oval poly'.split()
def __init__(self, maskstr, shapeconfig=REGIONS_FNAME):
"""Creates a new mask function using the given string to initialize it"""
# read the shape elements
try:
shapedict = self.readConfig(shapeconfig)
except Exception:
# couldn't read shapeconfig for any reason
shapedict = {}
# split our maskstr into elements and simplify them
els = []
self.align = None # this will be overwritten later
for s in maskstr.strip().split(';'):
els.extend(self.simplify(s.strip(), shapedict))
# now compute our list of shape funcs
self.maskstr = maskstr
self.shapes = els[:]
self.shapefuncs = [self.getShapeFunc(e, i+1) for i, e in enumerate(els)]
# | |
"opt-application"
],
"template": "user-application",
"template_variables": {
"OK1": "foo",
"OK2": "com.example.foo"
}
}
}
}
}'''
self._verify_manifest(m, expected=True)
def test_verify_manifest_full_bad(self):
'''Test verify_manifest (full bad)'''
m = '''{
"security": {
"profiles": {
"/com.example.foo": {
"abstractions": [
"audio",
"gnome"
],
"author": "<NAME>",
"binary": "/usr/foo/**",
"comment": "some free-form single-line comment",
"copyright": "Unstructured single-line copyright statement",
"name": "foo",
"policy_groups": [
"user-application",
"opt-application"
],
"read_path": [
"/tmp/foo_r",
"/tmp/bar_r/"
],
"template": "user-application",
"template_variables": {
"VAR1": "f*o",
"VAR2": "*foo",
"VAR3": "fo*",
"VAR4": "b{ar",
"VAR5": "b{a,r}",
"VAR6": "b}ar",
"VAR7": "bar[0-9]",
"VAR8": "b{ar",
"VAR9": "/tmp/../etc/passwd"
},
"write_path": [
"/tmp/foo_w",
"/tmp/bar_w/"
]
}
}
}
}'''
self._verify_manifest(m, expected=False, invalid=True)
def test_verify_manifest_binary(self):
'''Test verify_manifest (binary in /usr)'''
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"binary": "/usr/foo/**",
"template": "user-application"
}
}
}
}'''
self._verify_manifest(m, expected=True)
def test_verify_manifest_profile_profile_name_bad(self):
'''Test verify_manifest (bad profile_name)'''
m = '''{
"security": {
"profiles": {
"/foo": {
"binary": "/opt/com.example/foo/**",
"template": "user-application"
}
}
}
}'''
self._verify_manifest(m, expected=False, invalid=True)
m = '''{
"security": {
"profiles": {
"bin/*": {
"binary": "/opt/com.example/foo/**",
"template": "user-application"
}
}
}
}'''
self._verify_manifest(m, expected=False)
def test_verify_manifest_profile_profile_name(self):
'''Test verify_manifest (profile_name)'''
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"binary": "/opt/com.example/foo/**",
"template": "user-application"
}
}
}
}'''
self._verify_manifest(m, expected=True)
def test_verify_manifest_profile_abstractions(self):
'''Test verify_manifest (abstractions)'''
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"binary": "/opt/com.example/foo/**",
"template": "user-application",
"abstractions": [
"base"
]
}
}
}
}'''
self._verify_manifest(m, expected=True)
def test_verify_manifest_profile_abstractions_bad(self):
'''Test verify_manifest (bad abstractions)'''
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"binary": "/opt/com.example/foo/**",
"template": "user-application",
"abstractions": [
"user-tmp"
]
}
}
}
}'''
self._verify_manifest(m, expected=False)
def test_verify_manifest_profile_template_var(self):
'''Test verify_manifest (good template_var)'''
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"binary": "/opt/com.example/something with spaces/**",
"template": "user-application",
"template_variables": {
"OK1": "foo",
"OK2": "com.example.foo",
"OK3": "something with spaces"
}
}
}
}
}'''
self._verify_manifest(m, expected=True)
def test_verify_manifest_profile_template_var_bad(self):
'''Test verify_manifest (bad template_var)'''
for v in ['"VAR1": "f*o"',
'"VAR2": "*foo"',
'"VAR3": "fo*"',
'"VAR4": "b{ar"',
'"VAR5": "b{a,r}"',
'"VAR6": "b}ar"',
'"VAR7": "bar[0-9]"',
'"VAR8": "b{ar"',
'"VAR9": "foo/bar"' # this is valid, but potentially unsafe
]:
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"binary": "/opt/com.example/foo/**",
"template": "user-application",
"template_variables": {
%s
}
}
}
}
}''' % v
self._verify_manifest(m, expected=False)
def test_manifest_invalid(self):
'''Test invalid manifest (parse error)'''
m = '''{
"security": {
"com.example.foo": {
"binary": "/opt/com.example/foo/**",
"template": "user-application",
"abstractions": [
"base"
]
}'''
self._verify_manifest(m, expected=False, invalid=True)
def test_manifest_invalid2(self):
'''Test invalid manifest (profile_name is not key)'''
m = '''{
"security": {
"binary": "/opt/com.example/foo/**",
"template": "user-application",
"abstractions": [
"base"
]
}
}'''
self._verify_manifest(m, expected=False, invalid=True)
def test_manifest_invalid3(self):
'''Test invalid manifest (profile_name in dict)'''
m = '''{
"security": {
"binary": "/opt/com.example/foo/**",
"template": "user-application",
"abstractions": [
"base"
],
"profile_name": "com.example.foo"
}
}'''
self._verify_manifest(m, expected=False, invalid=True)
def test_manifest_invalid4(self):
'''Test invalid manifest (bad path in template var)'''
for v in ['"VAR1": "/tmp/../etc/passwd"',
'"VAR2": "./"',
'"VAR3": "foo\"bar"',
'"VAR4": "foo//bar"',
]:
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"binary": "/opt/com.example/foo/**",
"template": "user-application",
"template_variables": {
%s
}
}
}
}
}''' % v
args = self.full_args
args.append("--manifest=/dev/null")
(self.options, self.args) = easyprof.parse_args(args)
(binary, options) = easyprof.parse_manifest(m, self.options)[0]
params = easyprof.gen_policy_params(binary, options)
try:
easyprof.verify_manifest(params)
except easyprof.AppArmorException:
return
raise Exception ("Should have failed with invalid variable declaration")
# policy version tests
def test_policy_vendor_manifest_nonexistent(self):
'''Test policy vendor via manifest (nonexistent)'''
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"policy_vendor": "nonexistent",
"policy_version": 1.0,
"binary": "/opt/com.example/foo/**",
"template": "user-application"
}
}
}
}'''
# Build up our args
args = self.full_args
args.append("--manifest=/dev/null")
(self.options, self.args) = easyprof.parse_args(args)
(binary, self.options) = easyprof.parse_manifest(m, self.options)[0]
try:
easyprof.AppArmorEasyProfile(binary, self.options)
except easyprof.AppArmorException:
return
raise Exception ("Should have failed with non-existent directory")
def test_policy_version_manifest(self):
'''Test policy version via manifest (good)'''
policy_vendor = "somevendor"
policy_version = "1.0"
policy_subdir = "%s/%s" % (policy_vendor, policy_version)
m = '''{
"security": {
"profiles": {
"com.example.foo": {
"policy_vendor": "%s",
"policy_version": %s,
"binary": "/opt/com.example/foo/**",
"template": "user-application"
}
}
}
}''' % (policy_vendor, policy_version)
for d in ['policygroups', 'templates']:
shutil.copytree(os.path.join(self.tmpdir, d),
os.path.join(self.tmpdir, d, policy_subdir))
# Build up our args
args = self.full_args
args.append("--manifest=/dev/null")
(self.options, self.args) = easyprof.parse_args(args)
(binary, self.options) = easyprof.parse_manifest(m, self.options)[0]
easyp = easyprof.AppArmorEasyProfile(binary, self.options)
tdir = os.path.join(self.tmpdir, 'templates', policy_subdir)
for t in easyp.get_templates():
self.assertTrue(t.startswith(tdir))
pdir = os.path.join(self.tmpdir, 'policygroups', policy_subdir)
for p in easyp.get_policy_groups():
self.assertTrue(p.startswith(pdir))
params = easyprof.gen_policy_params(binary, self.options)
easyp.gen_policy(**params)
def test_policy_vendor_version_args(self):
'''Test policy vendor and version via command line args (good)'''
policy_version = "1.0"
policy_vendor = "somevendor"
policy_subdir = "%s/%s" % (policy_vendor, policy_version)
# Create the directories
for d in ['policygroups', 'templates']:
shutil.copytree(os.path.join(self.tmpdir, d),
os.path.join(self.tmpdir, d, policy_subdir))
# Build up our args
args = self.full_args
args.append("--policy-version=%s" % policy_version)
args.append("--policy-vendor=%s" % policy_vendor)
(self.options, self.args) = easyprof.parse_args(args)
(self.options, self.args) = easyprof.parse_args(self.full_args + [self.binary])
easyp = easyprof.AppArmorEasyProfile(self.binary, self.options)
tdir = os.path.join(self.tmpdir, 'templates', policy_subdir)
for t in easyp.get_templates():
self.assertTrue(t.startswith(tdir), \
"'%s' does not start with '%s'" % (t, tdir))
pdir = os.path.join(self.tmpdir, 'policygroups', policy_subdir)
for p in easyp.get_policy_groups():
self.assertTrue(p.startswith(pdir), \
"'%s' does not start with '%s'" % (p, pdir))
params = easyprof.gen_policy_params(self.binary, self.options)
easyp.gen_policy(**params)
def test_policy_vendor_args_nonexistent(self):
'''Test policy vendor via command line args (nonexistent)'''
policy_vendor = "nonexistent"
policy_version = "1.0"
args = self.full_args
args.append("--policy-version=%s" % policy_version)
args.append("--policy-vendor=%s" % policy_vendor)
(self.options, self.args) = easyprof.parse_args(args)
(self.options, self.args) = easyprof.parse_args(self.full_args + [self.binary])
try:
easyprof.AppArmorEasyProfile(self.binary, self.options)
except easyprof.AppArmorException:
return
raise Exception ("Should have failed with non-existent directory")
def test_policy_version_args_bad(self):
'''Test policy version via command line args (bad)'''
bad = [
"../../../../../../etc",
"notanumber",
"v1.0a",
"-1",
]
for policy_version in bad:
args = self.full_args
args.append("--policy-version=%s" % policy_version)
args.append("--policy-vendor=somevendor")
(self.options, self.args) = easyprof.parse_args(args)
(self.options, self.args) = easyprof.parse_args(self.full_args + [self.binary])
try:
easyprof.AppArmorEasyProfile(self.binary, self.options)
except easyprof.AppArmorException:
continue
raise Exception ("Should have failed with bad version")
def test_policy_vendor_args_bad(self):
'''Test policy vendor via command line args (bad)'''
bad = [
"../../../../../../etc",
"vendor with space",
"semicolon;isbad",
]
for policy_vendor in bad:
args = self.full_args
args.append("--policy-vendor=%s" % policy_vendor)
args.append("--policy-version=1.0")
(self.options, self.args) = easyprof.parse_args(args)
(self.options, self.args) = easyprof.parse_args(self.full_args + [self.binary])
try:
easyprof.AppArmorEasyProfile(self.binary, self.options)
except easyprof.AppArmorException:
continue
raise Exception ("Should have failed with bad vendor")
# output_directory tests
def test_output_directory_multiple(self):
'''Test output_directory (multiple)'''
files = dict()
files["com.example.foo"] = "com.example.foo"
files["com.ubuntu.developer.myusername.MyCoolApp"] = "com.ubuntu.developer.myusername.MyCoolApp"
files["usr.bin.baz"] = "/usr/bin/baz"
m = '''{
"security": {
"profiles": {
"%s": {
"abstractions": [
"audio",
"gnome"
],
"author": "<NAME>",
"binary": "/opt/foo/**",
"comment": "Unstructured single-line comment",
"copyright": "Unstructured single-line copyright statement",
"name": "My Foo App",
"policy_groups": [
"opt-application",
"user-application"
],
"read_path": [
"/tmp/foo_r",
"/tmp/bar_r/"
],
"template": "user-application",
"template_variables": {
"APPNAME": "foo",
"VAR1": "bar",
"VAR2": "baz"
},
"write_path": [
"/tmp/foo_w",
"/tmp/bar_w/"
]
},
"%s": {
"policy_groups": [
"opt-application",
"user-application"
],
"template": "user-application",
"template_variables": {
"APPNAME": "MyCoolApp",
"APPVERSION": "0.1.2"
}
},
"%s": {
"abstractions": [
"gnome"
],
"policy_groups": [
"user-application"
],
"template_variables": {
"APPNAME": "baz"
}
}
}
}
}''' % (files["com.example.foo"],
files["com.ubuntu.developer.myusername.MyCoolApp"],
files["usr.bin.baz"])
out_dir = os.path.join(self.tmpdir, "output")
args = self.full_args
args.append("--manifest=/dev/null")
(self.options, self.args) = easyprof.parse_args(args)
profiles = easyprof.parse_manifest(m, self.options)
for (binary, options) in profiles:
easyp = easyprof.AppArmorEasyProfile(binary, options)
params = easyprof.gen_policy_params(binary, options)
easyp.output_policy(params, dir=out_dir)
for fn in files:
f = os.path.join(out_dir, fn)
self.assertTrue(os.path.exists(f), "Could not find '%s'" % f)
def test_output_directory_single(self):
'''Test output_directory (single)'''
files = dict()
files["com.example.foo"] = "com.example.foo"
m = '''{
"security": {
"profiles": {
"%s": {
"abstractions": [
"audio",
"gnome"
],
"author": "<NAME>",
"binary": "/opt/foo/**",
"comment": "Unstructured single-line comment",
"copyright": "Unstructured single-line copyright statement",
"name": "<NAME>",
"policy_groups": [
"opt-application",
"user-application"
],
"read_path": [
"/tmp/foo_r",
"/tmp/bar_r/"
],
"template": "user-application",
"template_variables": {
"APPNAME": "foo",
"VAR1": "bar",
"VAR2": "baz"
},
"write_path": [
"/tmp/foo_w",
"/tmp/bar_w/"
]
}
}
}
}''' % (files["com.example.foo"])
out_dir = os.path.join(self.tmpdir, "output")
args = self.full_args
args.append("--manifest=/dev/null")
(self.options, self.args) = easyprof.parse_args(args)
profiles = easyprof.parse_manifest(m, self.options)
for (binary, options) in profiles:
easyp = easyprof.AppArmorEasyProfile(binary, options)
params = easyprof.gen_policy_params(binary, options)
easyp.output_policy(params, dir=out_dir)
for fn in files:
f = os.path.join(out_dir, fn)
self.assertTrue(os.path.exists(f), "Could not find '%s'" % f)
def test_output_directory_invalid(self):
'''Test output_directory (output directory exists as file)'''
files = dict()
files["usr.bin.baz"] = "/usr/bin/baz"
m = '''{
"security": {
"profiles": {
"%s": {
"abstractions": [
"gnome"
],
"policy_groups": [
"user-application"
],
"template_variables": {
"APPNAME": "baz"
}
}
}
}
}''' % files["usr.bin.baz"]
out_dir = os.path.join(self.tmpdir, "output")
open(out_dir, 'w').close()
args = self.full_args
args.append("--manifest=/dev/null")
(self.options, self.args) = easyprof.parse_args(args)
(binary, options) = easyprof.parse_manifest(m, self.options)[0]
easyp = easyprof.AppArmorEasyProfile(binary, options)
params = easyprof.gen_policy_params(binary, options)
try:
easyp.output_policy(params, dir=out_dir)
except easyprof.AppArmorException:
return
raise Exception ("Should have failed | |
<reponame>ashishdhngr/baserow
from unittest.mock import patch, call, ANY
import pytest
from django.db import transaction
from baserow.contrib.database.api.constants import PUBLIC_PLACEHOLDER_ENTITY_ID
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.views.handler import ViewHandler
from baserow.core.trash.handler import TrashHandler
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_created_public_views_receive_restricted_row_created_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# No public events should be sent to this form view
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
row = RowHandler().create_row(
user=user,
table=table,
values={
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_created_public_views_receive_row_created_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
row = RowHandler().create_row(
user=user,
table=table,
values={
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_deleted_public_views_receive_restricted_row_deleted_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
RowHandler().delete_row(user, table, row.id, model)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_deleted",
"row_id": row.id,
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_deleted",
"row_id": row.id,
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_deleted_public_views_receive_row_deleted_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
RowHandler().delete_row(user, table, row.id, model)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_deleted",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_id": row.id,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_not_visible_in_public_view_when_updated_to_be_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_filters_initially_hiding_all_rows = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_filters_initially_hiding_all_rows, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_filters_initially_hiding_all_rows,
field=visible_field,
type="equal",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_filters_initially_hiding_all_rows,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
initially_hidden_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichDoesntMatchFilter",
},
)
# Double check the row isn't visible in any views to begin with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_hidden_row) == []
RowHandler().update_row(
user,
table,
initially_hidden_row.id,
values={f"field_{hidden_field.id}": "ValueWhichMatchesFilter"},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_filters_initially_hiding_all_rows.slug}",
{
# The row should appear as a created event as for the public view
# it effectively has been created as it didn't exist before.
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": initially_hidden_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_updated_to_be_not_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_row_showing = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_row_showing, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
initially_visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichMatchesFilter",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_visible_row) == [
public_view_with_row_showing.view_ptr
]
# Update the row so it is no longer visible
RowHandler().update_row(
user,
table,
initially_visible_row.id,
values={
f"field_{hidden_field.id}": "ValueWhichDoesNotMatchFilter",
f"field_{visible_field.id}": "StillVisibleButNew",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_row_showing.slug}",
{
# The row should appear as a deleted event as for the public view
# it effectively has been.
"type": "row_deleted",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_id": initially_visible_row.id,
"row": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent in its state before it
# was updated
f"field_{visible_field.id}": "Visible",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_updated_to_still_be_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_row_showing = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_row_showing, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=hidden_field,
type="contains",
value="e",
)
model = table.get_model()
initially_visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "e",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_visible_row) == [
public_view_with_row_showing.view_ptr
]
# Update the row so it is still visible but changed
RowHandler().update_row(
user,
table,
initially_visible_row.id,
values={
f"field_{hidden_field.id}": "eee",
f"field_{visible_field.id}": "StillVisibleButUpdated",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_row_showing.slug}",
{
"type": "row_updated",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_before_update": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"row": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "StillVisibleButUpdated",
},
"metadata": {},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_restored_public_views_receive_restricted_row_created_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
TrashHandler.trash(
user, table.database.group, table.database, row, parent_id=table.id
)
TrashHandler.restore_item(user, "row", row.id, parent_trash_item_id=table.id)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": | |
%s=%s__tmp;' % (t, key, key),
'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)]
else:
query = None
if query:
logfile.write('timestamp: %s\n'
% datetime.datetime.today().isoformat())
self._db['_lastsql'] = '\n'.join(query)
for sub_query in query:
logfile.write(sub_query + '\n')
self._db._execute(sub_query)
if self._db._dbname in ['mysql', 'oracle']:
self._db.commit()
logfile.write('success!\n')
if key in sql_fields:
sql_fields_old[key] = sql_fields[key]
else:
del sql_fields_old[key]
tfile = open(self._dbt, 'w')
portalocker.lock(tfile, portalocker.LOCK_EX)
cPickle.dump(sql_fields_old, tfile)
portalocker.unlock(tfile)
tfile.close()
def create(self):
"""nothing to do; here for backward compatibility"""
pass
def _drop(self, mode = None):
t = self._tablename
c = mode or ''
if self._db._dbname in ['oracle']:
return ['DROP TABLE %s %s;' % (t, c), 'DROP SEQUENCE %s_sequence;'
% t]
elif self._db._dbname == 'firebird':
return ['DROP TABLE %s %s;' % (t, c), 'DROP GENERATOR GENID_%s;'
% t]
return ['DROP TABLE %s;' % t]
def drop(self, mode = None):
if self._dbt:
logfile = open(self._logfilename, 'a')
queries = self._drop(mode = mode)
self._db['_lastsql'] = '\n'.join(queries)
for query in queries:
if self._dbt:
logfile.write(query + '\n')
self._db._execute(query)
self._db.commit()
del self._db[self._tablename]
del self._db.tables[self._db.tables.index(self._tablename)]
self._db._update_referenced_by(self._tablename)
if self._dbt:
os.unlink(self._dbt)
logfile.write('success!\n')
def _insert(self, **fields):
(fs, vs) = ([], [])
invalid_fieldnames = [key for key in fields if not key in self.fields]
if invalid_fieldnames:
raise SyntaxError, 'invalid field names: %s' \
% repr(invalid_fieldnames)
for fieldname in self.fields:
if fieldname == 'id':
continue
field = self[fieldname]
(ft, fd) = (field.type, field._db._dbname)
if fieldname in fields:
fs.append(fieldname)
value = fields[fieldname]
if hasattr(value,'id'):
value = value.id
elif ft == 'string' and isinstance(value,(str,unicode)):
value = value[:field.length]
vs.append(sql_represent(value, ft, fd))
elif field.default != None:
fs.append(fieldname)
vs.append(sql_represent(field.default, ft, fd))
elif field.required is True:
raise SyntaxError,'Table: missing required field: %s'%field
sql_f = ', '.join(fs)
sql_v = ', '.join(vs)
sql_t = self._tablename
return 'INSERT INTO %s(%s) VALUES (%s);' % (sql_t, sql_f, sql_v)
def insert(self, **fields):
query = self._insert(**fields)
self._db['_lastsql'] = query
self._db._execute(query)
if self._db._dbname == 'sqlite':
id = self._db._cursor.lastrowid
elif self._db._dbname == 'postgres':
self._db._execute("select currval('%s_id_Seq')"
% self._tablename)
id = int(self._db._cursor.fetchone()[0])
elif self._db._dbname == 'mysql':
self._db._execute('select last_insert_id();')
id = int(self._db._cursor.fetchone()[0])
elif self._db._dbname in ['oracle']:
t = self._tablename
self._db._execute('SELECT %s_sequence.currval FROM dual;'
% t)
id = int(self._db._cursor.fetchone()[0])
elif self._db._dbname == 'mssql' or self._db._dbname\
== 'mssql2':
self._db._execute('SELECT @@IDENTITY;')
id = int(self._db._cursor.fetchone()[0])
elif self._db._dbname == 'firebird':
self._db._execute('SELECT gen_id(GENID_%s, 0) FROM rdb$database'
% self._tablename)
id = int(self._db._cursor.fetchone()[0])
elif self._db._dbname == 'informix':
id = self._db._cursor.sqlerrd[1]
elif self._db._dbname == 'db2':
self._db._execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;'%self._tablename)
id = int(self._db._cursor.fetchone()[0])
elif self._db._dbname == 'ingres':
tmp_seqname=gen_ingres_sequencename(self._tablename)
self._db._execute('select current value for %s' % tmp_seqname)
id = int(self._db._cursor.fetchone()[0]) # don't really need int type cast here...
else:
id = None
if not isinstance(id,int):
return id
rid = Reference(id)
(rid._table, rid._record) = (self, None)
return rid
def import_from_csv_file(
self,
csvfile,
id_map=None,
null='<NULL>',
unique='uuid',
):
"""
import records from csv file. Column headers must have same names as
table fields. field 'id' is ignored. If column names read 'table.file'
the 'table.' prefix is ignored.
'unique' argument is a field which must be unique
(typically a uuid field)
"""
reader = csv.reader(csvfile)
colnames = None
if isinstance(id_map, dict):
if not self._tablename in id_map:
id_map[self._tablename] = {}
id_map_self = id_map[self._tablename]
def fix(field, value, id_map):
if value == null:
value = None
elif id_map and field.type[:10] == 'reference ':
try:
value = id_map[field.type[9:].strip()][value]
except KeyError:
pass
return (field.name, value)
for line in reader:
if not line:
break
if not colnames:
colnames = [x[x.find('.') + 1:] for x in line]
c = [i for i in xrange(len(line)) if colnames[i] != 'id']
cid = [i for i in xrange(len(line)) if colnames[i] == 'id']
if cid:
cid = cid[0]
else:
items = [fix(self[colnames[i]], line[i], id_map) for i in c]
if not unique or unique not in colnames:
new_id = self.insert(**dict(items))
else:
# Validation. Check for duplicate of 'unique' &,
# if present, update instead of insert.
for i in c:
if colnames[i] == unique:
_unique = line[i]
query = self._db[self][unique]==_unique
if self._db(query).count():
self._db(query).update(**dict(items))
new_id = self._db(query).select()[0].id
else:
new_id = self.insert(**dict(items))
if id_map and cid != []:
id_map_self[line[cid]] = new_id
def on(self, query):
return SQLJoin(self, query)
def _truncate(self, mode = None):
t = self._tablename
c = mode or ''
if self._db._dbname == 'sqlite':
return ['DELETE FROM %s;' % t,
"DELETE FROM sqlite_sequence WHERE name='%s';" % t]
return ['TRUNCATE TABLE %s %s;' % (t, c)]
def truncate(self, mode = None):
if self._dbt:
logfile = open(self._logfilename, 'a')
queries = self._truncate(mode = mode)
self._db['_lastsql'] = '\n'.join(queries)
for query in queries:
if self._dbt:
logfile.write(query + '\n')
self._db._execute(query)
self._db.commit()
if self._dbt:
logfile.write('success!\n')
# added by <NAME> (2009)
class KeyedTable(Table):
"""
an instance of this class represents a database keyed table
Example::
db = DAL(...)
db.define_table('account',
Field('accnum','integer'),
Field('acctype'),
Field('accdesc'),
primarykey=['accnum','acctype'])
db.users.insert(accnum=1000,acctype='A',accdesc='Assets')
db.users.drop()
db.define_table('subacct',
Field('sanum','integer'),
Field('refnum','reference account.accnum'),
Field('reftype','reference account.acctype'),
Field('sadesc','string'),
primarykey=['sanum']))
Notes:
1) primarykey is a list of the field names that make up the primary key
2) all primarykey fields will have NOT NULL set even if not specified
3) references are to other keyed tables only
4) references must use tablename.fieldname format, as shown above
5) update_record function is not available
"""
def __init__(
self,
db,
tablename,
*fields,
**args
):
"""
Initializes the table and performs checking on the provided fields.
If a field is of type Table, the fields (excluding 'id') from that table
will be used instead.
:raises SyntaxError: when a supplied field is of incorrect type.
"""
for k,v in args.iteritems():
if k != 'primarykey':
raise SyntaxError, 'invalid table \'%s\' attribute: %s' % (tablename, k)
elif isinstance(v,list):
self._primarykey=v
else:
raise SyntaxError, 'primarykey must be a list of fields from table \'%s\' ' %tablename
# new_fields = [ Field('id', 'id') ]
new_fields = []
for field in fields:
if isinstance(field, Field):
new_fields.append(field)
elif isinstance(field, Table):
new_fields += [copy.copy(field[f]) for f in
field.fields if f != 'id']
else:
raise SyntaxError, \
'define_table argument is not a Field: %s' % field
fields = new_fields
self._db = db
self._tablename = tablename
self.fields = SQLCallableList()
self.virtualfields = []
fields = list(fields)
for field in fields:
self.fields.append(field.name)
self[field.name] = field
field._tablename = self._tablename
field._table = self
field._db = self._db
self.ALL = SQLALL(self)
for k in self._primarykey:
if k not in self.fields:
raise SyntaxError,\
'primarykey must be a list of fields from table \'%s\' ' %\
tablename
else:
self[k].notnull = True
# KeyedTable
def _create_references(self):
self._referenced_by = []
for fieldname in self.fields:
field=self[fieldname]
if isinstance(field.type,str) and field.type[:10] == 'reference ':
ref = field.type[10:].strip()
refs = ref.split('.')
if not ref:
raise SyntaxError, 'Table: reference to nothing: %s' %ref
if len(refs)!=2:
raise SyntaxError, 'invalid reference: %s' %ref
rtablename,rfieldname = refs
if not rtablename in self._db.tables:
raise SyntaxError,\
'Table: table \'%s\'does not exist' %rtablename
rtable = self._db[rtablename]
if not isinstance(rtable, KeyedTable):
raise SyntaxError,\
'keyed tables can only reference other keyed tables (for now)'
if self._tablename in rtable.fields:
raise SyntaxError,\
'Field: table %s has same name as a field in referenced table \'%s\'' %\
(self._tablename, rtablename)
if rfieldname not in rtable.fields:
raise SyntaxError,\
"invalid field '%s' for referenced table '%s' in table '%s'" %(rfieldname, rtablename, self._tablename)
rtable._referenced_by.append((self._tablename, field.name))
# KeyedTable
def _build_query(self,key):
query = None
for k,v in key.iteritems():
if k in self._primarykey:
if query:
query = query & (self[k] == v)
else:
query = (self[k] == v)
else:
raise SyntaxError,\
'Field %s is not part of the primary key of %s'%\
(k,self._tablename)
return query
# KeyedTable ok
def __getitem__(self, key):
if not key:
return None
if isinstance(key, dict):
query = self._build_query(key)
rows = self._db(query).select()
if rows:
return rows[0]
return None
else:
return dict.__getitem__(self, str(key))
# KeyedTable ok
def __setitem__(self, key, value):
# ??? handle special case where primarykey has all fields ???
if isinstance(key, dict) and isinstance(value, dict):
if setsSet(key.keys())==setsSet(self._primarykey):
value = self._filter_fields(value)
kv = {}
kv.update(value)
kv.update(key)
if not self.insert(**kv):
query = self._build_query(key)
self._db(query).update(**self._filter_fields(value))
else:
raise SyntaxError,\
'key must have all fields from primary key: %s'%\
(self._primarykey)
else:
if isinstance(key, dict):
raise SyntaxError,\
'value must be a dictionary: %s'%value
# 'key must be a dictionary with primary key fields: %s'%\
# self._primarykey
dict.__setitem__(self, | |
import os
import platform
import sys
from cffi import FFI
__all__ = ['ffi', 'libca']
ffi = FFI()
# cadef.h
ffi.cdef("""
typedef void *chid;
typedef chid chanId;
typedef long chtype;
typedef double ca_real;
typedef void *evid;
/* arguments passed to user connection handlers */
struct connection_handler_args {
chanId chid; /* channel id */
long op; /* one of CA_OP_CONN_UP or CA_OP_CONN_DOWN */
};
typedef void caCh (struct connection_handler_args args);
/* CFFI does not support bit field */
/*
typedef struct ca_access_rights {
unsigned read_access:1;
unsigned write_access:1;
} caar;
*/
typedef struct ca_access_rights {
unsigned access;
} caar;
/* arguments passed to user access rights handlers */
struct access_rights_handler_args {
chanId chid; /* channel id */
caar ar; /* new access rights state */
};
typedef void caArh (struct access_rights_handler_args args);
/*
* Arguments passed to event handlers and get/put call back handlers.
*
* The status field below is the CA ECA_XXX status of the requested
* operation which is saved from when the operation was attempted in the
* server and copied back to the clients call back routine.
* If the status is not ECA_NORMAL then the dbr pointer will be NULL
* and the requested operation can not be assumed to be successful.
*/
typedef struct event_handler_args {
void *usr; /* user argument supplied with request */
chanId chid; /* channel id */
long type; /* the type of the item returned */
long count; /* the element count of the item returned */
const void *dbr; /* a pointer to the item returned */
int status; /* ECA_XXX status of the requested op from the server */
} evargs;
typedef void caEventCallBackFunc (struct event_handler_args);
void ca_test_event
(
struct event_handler_args
);
/* arguments passed to user exception handlers */
struct exception_handler_args {
void *usr; /* user argument supplied when installed */
chanId chid; /* channel id (may be nill) */
long type; /* type requested */
long count; /* count requested */
void *addr; /* user's address to write results of CA_OP_GET */
long stat; /* channel access ECA_XXXX status code */
long op; /* CA_OP_GET, CA_OP_PUT, ..., CA_OP_OTHER */
const char *ctx; /* a character string containing context info */
const char *pFile; /* source file name (may be NULL) */
unsigned lineNo; /* source file line number (may be zero) */
};
typedef unsigned CA_SYNC_GID;
/*
* External OP codes for CA operations
*/
#define CA_OP_GET 0
#define CA_OP_PUT 1
#define CA_OP_CREATE_CHANNEL 2
#define CA_OP_ADD_EVENT 3
#define CA_OP_CLEAR_EVENT 4
#define CA_OP_OTHER 5
/*
* used with connection_handler_args
*/
#define CA_OP_CONN_UP 6
#define CA_OP_CONN_DOWN 7
/* depricated */
#define CA_OP_SEARCH 2
short ca_field_type(chid chan);
unsigned long ca_element_count(chid chan);
const char * ca_name (chid chan);
void ca_set_puser (chid chan, void *puser);
void * ca_puser (chid chan);
unsigned ca_read_access (chid chan);
unsigned ca_write_access (chid chan);
/*
* cs_ - `channel state'
*
* cs_never_conn valid chid, IOC not found
* cs_prev_conn valid chid, IOC was found, but unavailable
* cs_conn valid chid, IOC was found, still available
* cs_closed channel deleted by user
*/
enum channel_state {cs_never_conn, cs_prev_conn, cs_conn, cs_closed};
enum channel_state ca_state (chid chan);
/************************************************************************/
/* Perform Library Initialization */
/* */
/* Must be called once before calling any of the other routines */
/************************************************************************/
enum ca_preemptive_callback_select
{ ca_disable_preemptive_callback, ca_enable_preemptive_callback };
int ca_context_create(enum ca_preemptive_callback_select enable_premptive);
void ca_detach_context ();
/************************************************************************/
/* Remove CA facility from your task */
/* */
/* Normally called automatically at task exit */
/************************************************************************/
void ca_context_destroy (void);
typedef unsigned capri;
#define CA_PRIORITY_MAX 99
#define CA_PRIORITY_MIN 0
#define CA_PRIORITY_DEFAULT 0
#define CA_PRIORITY_DB_LINKS 80
#define CA_PRIORITY_ARCHIVE 20
#define CA_PRIORITY_OPI 0
/*
* ca_create_channel ()
*
* pChanName R channel name string
* pConnStateCallback R address of connection state change
* callback function
* pUserPrivate R placed in the channel's user private field
* o can be fetched later by ca_puser(CHID)
* o passed as void * arg to *pConnectCallback above
* priority R priority level in the server 0 - 100
* pChanID RW channel id written here
*/
int ca_create_channel
(
const char *pChanName,
caCh *pConnStateCallback,
void *pUserPrivate,
capri priority,
void *pChanID
);
/*
* ca_change_connection_event()
*
* chan R channel identifier
* pfunc R address of connection call-back function
*/
int ca_change_connection_event
(
chid chan,
caCh * pfunc
);
/*
* ca_replace_access_rights_event ()
*
* chan R channel identifier
* pfunc R address of access rights call-back function
*/
int ca_replace_access_rights_event (
chid chan,
caArh *pfunc
);
/*
* ca_add_exception_event ()
*
* replace the default exception handler
*
* pfunc R address of exception call-back function
* pArg R copy of this pointer passed to exception
* call-back function
*/
typedef void caExceptionHandler (struct exception_handler_args);
int ca_add_exception_event
(
caExceptionHandler *pfunc,
void *pArg
);
/*
* ca_clear_channel()
* - deallocate resources reserved for a channel
*
* chanId R channel ID
*/
int ca_clear_channel
(
chid chanId
);
/************************************************************************/
/* Write a value to a channel */
/************************************************************************/
/*
* ca_array_put()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue R new channel value copied from this location
*/
int ca_array_put
(
chtype type,
unsigned long count,
chid chanId,
const void * pValue
);
/*
* ca_array_put_callback()
*
* This routine functions identically to the original ca put request
* with the addition of a callback to the user supplied function
* after recod processing completes in the IOC. The arguments
* to the user supplied callback function are declared in
* the structure event_handler_args and include the pointer
* sized user argument supplied when ca_array_put_callback() is called.
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue R new channel value copied from this location
* pFunc R pointer to call-back function
* pArg R copy of this pointer passed to pFunc
*/
int ca_array_put_callback
(
chtype type,
unsigned long count,
chid chanId,
const void * pValue,
caEventCallBackFunc * pFunc,
void * pArg
);
/************************************************************************/
/* Read a value from a channel */
/************************************************************************/
/*
* ca_array_get()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue W channel value copied to this location
*/
int ca_array_get
(
long type,
unsigned long count,
chid chanId,
void * pValue
);
/************************************************************************/
/* Read a value from a channel and run a callback when the value */
/* returns */
/* */
/* */
/************************************************************************/
/*
* ca_array_get_callback()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pFunc R pointer to call-back function
* pArg R copy of this pointer passed to pFunc
*/
int ca_array_get_callback
(
chtype type,
unsigned long count,
chid chanId,
caEventCallBackFunc * pFunc,
void * pArg
);
/************************************************************************/
/* Specify a function to be executed whenever significant changes */
/* occur to a channel. */
/* NOTES: */
/* 1) Evid may be omited by passing a NULL pointer */
/* */
/* 2) An array count of zero specifies the native db count */
/* */
/************************************************************************/
/*
* ca_create_subscription ()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* mask R event mask - one of {DBE_VALUE, DBE_ALARM, DBE_LOG}
* pFunc R pointer to call-back function
* pArg R copy of this pointer passed to pFunc
* pEventID W event id written at specified address
*/
int ca_create_subscription
(
chtype type,
unsigned long count,
chid chanId,
long mask,
caEventCallBackFunc * pFunc,
void * pArg,
evid * pEventID
);
/************************************************************************/
/* Remove a function from a list of those specified to run */
/* whenever significant changes occur to a channel */
/* */
/************************************************************************/
/*
* ca_clear_subscription()
*
* eventID R event id
*/
int ca_clear_subscription
(
evid eventId
);
chid ca_evid_to_chid ( evid id );
/************************************************************************/
/* */
/* Requested data is not necessarily stable prior to */
/* return from called subroutine. Call ca_pend_io() */
/* to guarantee that requested data is stable. Call the routine */
/* ca_flush_io() to force all outstanding requests to be */
/* sent out over the network. Significant increases in */
/* performance have been measured when batching several remote */
/* requests together into one message. Additional */
/* improvements can be obtained by performing local processing */
/* in parallel with outstanding remote processing. */
/* */
/* FLOW OF TYPICAL APPLICATION */
/* */
/* search() ! Obtain Channel ids */
/* . ! " */
/* . ! " */
/* pend_io ! wait for channels to connect */
/* */
/* get() ! several requests for remote info */
/* get() ! " | |
line to the point.
:rtype: float
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0), Vector(1,0))
>>> result = l.distance_to_point(Point(0,10))
>>> print(result)
10
# 3D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0,0), Vector(1,0,0))
>>> result = l.distance_to_point(Point(10,0,0))
>>> print(result)
0
.. seealso:: `<https://geomalgorithms.com/a02-_lines.html>`_
"""
w=point-self.P0
b=w.dot(self.vL) / self.vL.dot(self.vL)
ptB=self.P0+self.vL*b
return (ptB-point).length
def _intersect_line_skew(self,skew_line):
"""Returns the point of intersection of this line and the supplied skew line
"""
#2D
if self.nD==2:
return self._intersect_line_skew_2D(skew_line)
#3D
elif self.nD==3:
return self._intersect_line_skew_3D(skew_line)
else:
raise Exception # must be either 2D or 3D
def _intersect_line_skew_2D(self,skew_line):
"""Returns the point of intersection of this line and the supplied skew line
"""
u=self.vL
v=skew_line.vL
w=self.P0-skew_line.P0
t=-v.perp_product(w) / v.perp_product(u)
return self.calculate_point(t)
def _intersect_line_skew_3D(self,skew_line):
"""Returns the point of intersection of this line and the supplied (skew) line
- return value can be:
- None -> no intersection (for skew lines which do not intersect in 3D space)
- Point -> a point (for skew lines which intersect)
"""
if not self.is_parallel(skew_line):
# find the coordinate to ignore for the projection
cp=self.vL.cross_product(skew_line.vL)
absolute_coords=[abs(x) for x in cp]
i=absolute_coords.index(max(absolute_coords)) % 3 # the coordinate to ignore for projection
# project 3D lines to 2D
self2D=self.project_2D(i)
skew_line2D=skew_line.project_2D(i)
# find intersection point for 2D lines
ipt=self2D._intersect_line_skew_2D(skew_line2D)
# find t values for the intersection point on each 2D line
t1=self2D.calculate_t_from_coordinates(*ipt)
t2=skew_line2D.calculate_t_from_coordinates(*ipt)
# calculate the 3D intersection points from the t values
ipt1=self.calculate_point(t1)
ipt2=skew_line.calculate_point(t2)
if ipt1==ipt2: # test the two 3D intersection points are the same
return ipt1
else:
return None
else:
raise ValueError('%s and %s are not skew lines' % (self,skew_line))
def calculate_point(self,t):
"""Returns a point on the line for a given t value.
:param t: The t value of the equation of the line.
:type t: float
:return: A point on the line calcualted using the t value.
:rtype: Point
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0), Vector(1,0))
>>> result = l.calcuate_point(3)
>>> print(result)
Point(3,0)
# 3D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0,0), Vector(1,0,0))
>>> result = l.calcuate_point(-3)
>>> print(result)
Point(-3,0,0)
"""
return self.P0 + (self.vL * t)
def calculate_t_from_coordinates(self,*coordinates):
"""Returns t for a given set of coordinates.
First attempts to calculate t from the x coordinate.
If this fails then next attempts to calculate t from the y coordinate.
If this fails then attempts to calculate t from the z coordinate.
:param coordinates: Argument list of xy or xyz coordinate values (floats).
:return: The calculated t value.
:rtype: float
:Example:
.. code-block:: python
# 2D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0), Vector(1,0))
>>> result = l.calculate_t_from_point(Point(3,0))
>>> print(result)
3
# 3D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0,0), Vector(1,0,0))
>>> result = l.calculate_t_from_point(Point(3,0,0))
>>> print(result)
3
"""
for P0,vL,point in zip(self.P0,self.vL,coordinates): # loop through x, y, z components
if not math.isclose(vL, 0, abs_tol=ABS_TOL):
return (point-P0) / vL
raise Exception()
def contains(self,obj):
"""Tests if the line contains the object.
:param obj: A point, halfline or segment.
:type obj: Point, Halfline, Segment
:raises TypeError: If supplied object is not supported by this method.
:return: For point, True if the point lies on the line; otherwise False.
For halfline, True if the halfline startpoint is on the line and
the halfline vector is collinear to the line vector; otherwise False.
For segment, True if the segment start and endpoints are on the line; otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0), Vector(1,0))
>>> result = Point(2,0) in l
>>> print(result)
True
# 3D example
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0,0), Vector(1,0,0))
>>> hl = Halfline(Point(0,0,0), Vector(-1,0,0))
>>> result = hl in l
>>> print(result)
True
"""
if isinstance(obj,Point):
t=self.calculate_t_from_coordinates(*obj)
pt=self.calculate_point(t)
return obj==pt
elif isinstance(obj,Halfline):
return self.contains(obj.P0) and obj.vL.is_collinear(self.vL)
elif isinstance(obj,Segment):
return self.contains(obj.P0) and self.contains(obj.P1)
else:
raise TypeError
def distance(self,obj):
"""Returns the distance to the supplied object.
:param obj: The object to calculate the distance to.
:type obj: Point, Line
:returns: The distance between the line and the object.
:rtype: float
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0), Vector(1,0))
>>> result = l.distance(Point(0,10))
>>> print(result)
10
"""
if isinstance(obj, Point):
return self._distance_to_point(obj)
if isinstance(obj, Line):
return self._distance_to_line(obj)
else:
raise TypeError('Line.distance does not accept a %s type' % obj.__class__)
def intersect_line(self,line):
"""Returns the intersection of this line with the supplied line.
:param line: A line.
:type line: Line
:return: Returns a line (this line) if lines are collinear.
Returns None (i.e. no intersection) if lines are parallel.
For 2D, returns a point if lines are skew.
For 3D, returns either None or a point if lines are skew.
:rtype: None, Point, Line
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> from crossproduct import Point, Vector, Line
>>> l1 = Line(Point(0,0), Vector(1,0))
>>> l2 = Line(Point(0,0), Vector(0,1))
>>> result = l.intersect_line(l2)
>>> print(result)
Point(0,0)
# 3D example
>>> from crossproduct import Point, Vector, Line
>>> l1 = Line(Point(0,0,0), Vector(1,0,0))
>>> l2 = Line(Point(0,0,1), Vector(1,0,0))
>>> result = l1.intersect_line(l2)
>>> print(result)
None
.. seealso:: `<https://geomalgorithms.com/a05-_intersect-1.html>`_
"""
if self==line: # test for collinear lines
return self
elif self.is_parallel(line): # test for parallel lines
return None
else: # a skew line
return self._intersect_line_skew(line)
def is_parallel(self,line):
"""Tests if this line and the supplied line are parallel.
:param obj: A line.
:type obj: Line
:return: Returns True if the lines are parallel (this includes the
case of collinear lines);
otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> from crossproduct import Point, Vector, Line
>>> l1 = Line(Point(0,0), Vector(1,0))
>>> l2 = Line(Point(0,0), Vector(0,1))
>>> result = l.is_parallel(l2)
>>> print(result)
False
# 3D example
>>> from crossproduct import Point, Vector, Line
>>> l1 = Line(Point3D(0,0,0), Vector(1,0,0))
>>> l2 = Line(Point3D(0,0,1), Vector(2,0,0))
>>> result = l1.is_parallel(l2)
>>> print(result)
True
"""
return self.vL.is_collinear(line.vL)
@property
def nD(self):
"""The number of dimensions of the line.
:returns: 2 or 3
:rtype: int
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point, Vector, Line
>>> l = Line(Point(0,0,0), Vector(1,0,0))
>>> print(l.nD)
3
"""
return self.P0.nD
@property
def P0(self):
"""The starting point of the line.
:rtype: Point
"""
return self._P0
def plot(self, ax, *args, **kwargs):
"""Plots the line on the supplied axes.
:param ax: An 2D or 3D Axes instance.
:type ax: matplotlib.axes.Axes, mpl_toolkits.mplot3d.axes3d.Axes3D
:param args: positional arguments to be passed to the Axes.plot call.
:param kwargs: keyword arguments to be passed to the Axes.plot call.
.. rubric:: Code Example
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from crossproduct import Point, Vector, Line
>>> fig, ax = plt.subplots()
>>> l=Line(Point(0,0),Vector(1,1))
>>> l.plot(ax)
>>> plt.show()
.. image:: /_static/line_plot_2D.png
|
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
>>> from crossproduct import Point, | |
<gh_stars>1000+
import os
import platform
import logging
import asyncio
from functools import partial
from collections import namedtuple
from io import StringIO, BytesIO
from urllib.parse import urlparse, parse_qsl, urlencode, urlunparse
from http.client import responses
try:
import ssl
BaseSSLError = ssl.SSLError
except ImportError: # pragma nocover
ssl = None
class BaseSSLError(Exception):
pass
try:
from certifi import where
DEFAULT_CA_BUNDLE_PATH = where()
except ImportError: # pragma nocover
DEFAULT_CA_BUNDLE_PATH = None
from multidict import CIMultiDict
import pulsar
from pulsar.api import (
AbortEvent, AbstractClient, Pool, Connection,
ProtocolConsumer, HttpRequestException, HttpConnectionError,
SSLError, cfg_value
)
from pulsar.utils import websocket
from pulsar.utils.system import json as _json
from pulsar.utils.string import to_bytes
from pulsar.utils import http
from pulsar.utils.structures import mapping_iterator
from pulsar.async.timeout import timeout as async_timeout
from pulsar.utils.httpurl import (
encode_multipart_formdata, CHARSET, get_environ_proxies, is_succesful,
get_hostport, cookiejar_from_dict, http_chunks, JSON_CONTENT_TYPES,
parse_options_header, tls_schemes, parse_header_links, requote_uri,
)
from .plugins import (
handle_cookies, WebSocket, Redirect, start_request, RequestKey,
keep_alive, InfoHeaders, Expect
)
from .auth import Auth, HTTPBasicAuth
from .stream import HttpStream
from .decompress import GzipDecompress, DeflateDecompress
scheme_host = namedtuple('scheme_host', 'scheme netloc')
LOGGER = logging.getLogger('pulsar.http')
FORM_URL_ENCODED = 'application/x-www-form-urlencoded'
MULTIPART_FORM_DATA = 'multipart/form-data'
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def scheme_host_port(url):
url = urlparse(url)
host, port = get_hostport(url.scheme, url.netloc)
return url.scheme, host, port
def is_streamed(data):
try:
len(data)
except TypeError:
return True
return False
def split_url_params(params):
for key, values in mapping_iterator(params):
if not isinstance(values, (list, tuple)):
values = (values,)
for value in values:
yield key, value
def full_url(url, params, method=None):
p = urlparse(url)
if not p.netloc and method == 'CONNECT':
p = urlparse('http://%s' % url)
params = mapping_iterator(params)
query = parse_qsl(p.query, True)
query.extend(split_url_params(params))
query = urlencode(query)
return requote_uri(
urlunparse((p.scheme, p.netloc, p.path, p.params, query, p.fragment))
)
class RequestBase:
inp_params = None
release_connection = True
history = None
url = None
@property
def unverifiable(self):
"""Unverifiable when a redirect.
It is a redirect when :attr:`history` has past requests.
"""
return bool(self.history)
@property
def origin_req_host(self):
"""Required by Cookies handlers
"""
if self.history:
return self.history[0].request.origin_req_host
else:
return scheme_host_port(self.url)[1]
@property
def type(self):
return self.key.scheme
@property
def full_url(self):
return self.url
def new_parser(self, protocol):
protocol.headers = CIMultiDict()
return self.client.http_parser(protocol)
def get_full_url(self):
"""Required by Cookies handlers
"""
return self.url
def write_body(self, transport):
pass
class HttpTunnel(RequestBase):
first_line = None
data = None
decompress = False
method = 'CONNECT'
def __init__(self, client, req):
self.client = client
self.key = req
self.headers = CIMultiDict(client.DEFAULT_TUNNEL_HEADERS)
def __repr__(self):
return 'Tunnel %s' % self.url
__str__ = __repr__
def encode(self):
self.headers['host'] = self.key.netloc
self.first_line = 'CONNECT http://%s:%s HTTP/1.1' % self.key.address
buffer = [self.first_line.encode('ascii'), b'\r\n']
buffer.extend((('%s: %s\r\n' % (name, value)).encode(CHARSET)
for name, value in self.headers.items()))
buffer.append(b'\r\n')
return b''.join(buffer)
def has_header(self, header_name):
return header_name in self.headers
def get_header(self, header_name, default=None):
return self.headers.get(header_name, default)
def remove_header(self, header_name):
self.headers.pop(header_name, None)
class HttpRequest(RequestBase):
"""An :class:`HttpClient` request for an HTTP resource.
This class has a similar interface to :class:`urllib.request.Request`.
:param files: optional dictionary of name, file-like-objects.
:param allow_redirects: allow the response to follow redirects.
.. attribute:: method
The request method
.. attribute:: version
HTTP version for this request, usually ``HTTP/1.1``
.. attribute:: history
List of past :class:`.HttpResponse` (collected during redirects).
.. attribute:: wait_continue
if ``True``, the :class:`HttpRequest` includes the
``Expect: 100-Continue`` header.
.. attribute:: stream
Allow for streaming body
"""
_proxy = None
_ssl = None
_tunnel = None
_write_done = False
def __init__(self, client, url, method, inp_params=None, headers=None,
data=None, files=None, json=None, history=None, auth=None,
charset=None, max_redirects=10, source_address=None,
allow_redirects=False, decompress=True, version=None,
wait_continue=False, websocket_handler=None, cookies=None,
params=None, stream=False, proxies=None, verify=True,
cert=None, **extra):
self.client = client
self.method = method.upper()
self.inp_params = inp_params or {}
self.unredirected_headers = CIMultiDict()
self.history = history
self.wait_continue = wait_continue
self.max_redirects = max_redirects
self.allow_redirects = allow_redirects
self.charset = charset or 'utf-8'
self.version = version
self.decompress = decompress
self.websocket_handler = websocket_handler
self.source_address = source_address
self.stream = stream
self.verify = verify
self.cert = cert
if auth and not isinstance(auth, Auth):
auth = HTTPBasicAuth(*auth)
self.auth = auth
self.url = full_url(url, params, method=self.method)
self._set_proxy(proxies)
self.key = RequestKey.create(self)
self.headers = client.get_headers(self, headers)
self.body = self._encode_body(data, files, json)
self.unredirected_headers['host'] = self.key.netloc
cookies = cookiejar_from_dict(client.cookies, cookies)
if cookies:
cookies.add_cookie_header(self)
@property
def _loop(self):
return self.client._loop
@property
def ssl(self):
"""Context for TLS connections.
If this is a tunneled request and the tunnel connection is not yet
established, it returns ``None``.
"""
return self._ssl
@property
def proxy(self):
"""Proxy server for this request.
"""
return self._proxy
@property
def tunnel(self):
"""Tunnel for this request.
"""
return self._tunnel
def __repr__(self):
return self.first_line()
__str__ = __repr__
def first_line(self):
if self.method == 'CONNECT':
url = self.key.netloc
elif self._proxy:
url = self.url
else:
p = urlparse(self.url)
url = urlunparse(('', '', p.path or '/', p.params,
p.query, p.fragment))
return '%s %s %s' % (self.method, url, self.version)
def is_chunked(self):
return self.body and 'content-length' not in self.headers
def encode(self):
"""The bytes representation of this :class:`HttpRequest`.
Called by :class:`HttpResponse` when it needs to encode this
:class:`HttpRequest` before sending it to the HTTP resource.
"""
# Call body before fist_line in case the query is changes.
first_line = self.first_line()
if self.body and self.wait_continue:
self.headers['expect'] = '100-continue'
headers = self.headers
if self.unredirected_headers:
headers = self.unredirected_headers.copy()
headers.update(self.headers)
buffer = [first_line.encode('ascii'), b'\r\n']
buffer.extend((('%s: %s\r\n' % (name, value)).encode(CHARSET)
for name, value in headers.items()))
buffer.append(b'\r\n')
return b''.join(buffer)
def add_header(self, key, value):
self.headers[key] = value
def has_header(self, header_name):
"""Check ``header_name`` is in this request headers.
"""
return (header_name in self.headers or
header_name in self.unredirected_headers)
def get_header(self, header_name, default=None):
"""Retrieve ``header_name`` from this request headers.
"""
return self.headers.get(
header_name, self.unredirected_headers.get(header_name, default))
def remove_header(self, header_name):
"""Remove ``header_name`` from this request.
"""
val1 = self.headers.pop(header_name, None)
val2 = self.unredirected_headers.pop(header_name, None)
return val1 or val2
def add_unredirected_header(self, header_name, header_value):
self.unredirected_headers[header_name] = header_value
def write_body(self, transport):
assert not self._write_done, 'Body already sent'
self._write_done = True
if not self.body:
return
if is_streamed(self.body):
self._loop.create_task(self._write_streamed_data(transport))
else:
self._write_body_data(transport, self.body, True)
# INTERNAL ENCODING METHODS
def _encode_body(self, data, files, json):
body = None
ct = None
if isinstance(data, (str, bytes)):
if files:
raise ValueError('data cannot be a string or bytes when '
'files are present')
body = to_bytes(data, self.charset)
elif data and is_streamed(data):
if files:
raise ValueError('data cannot be an iterator when '
'files are present')
if 'content-length' not in self.headers:
self.headers['transfer-encoding'] = 'chunked'
return data
elif data or files:
if files:
body, ct = self._encode_files(data, files)
else:
body, ct = self._encode_params(data)
elif json:
body = _json.dumps(json).encode(self.charset)
ct = 'application/json'
if not self.headers.get('content-type') and ct:
self.headers['Content-Type'] = ct
if body:
self.headers['content-length'] = str(len(body))
return body
def _encode_files(self, data, files):
fields = []
for field, val in mapping_iterator(data or ()):
if (isinstance(val, str) or isinstance(val, bytes) or
not hasattr(val, '__iter__')):
val = [val]
for v in val:
if v is not None:
if not isinstance(v, bytes):
v = str(v)
fields.append((field.decode('utf-8') if
isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str)
else v))
for (k, v) in mapping_iterator(files):
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, bytes):
fp = BytesIO(fp)
elif isinstance(fp, str):
fp = StringIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
fields.append((k, new_v))
#
return encode_multipart_formdata(fields, charset=self.charset)
def _encode_params(self, params):
content_type = self.headers.get('content-type')
# No content type given, chose one
if not content_type:
content_type = FORM_URL_ENCODED
if hasattr(params, 'read'):
params = params.read()
if content_type in JSON_CONTENT_TYPES:
body = _json.dumps(params)
elif content_type == FORM_URL_ENCODED:
body = urlencode(tuple(split_url_params(params)))
elif content_type == MULTIPART_FORM_DATA:
body, content_type = encode_multipart_formdata(
params, charset=self.charset)
else:
body = params
return to_bytes(body, self.charset), content_type
def _write_body_data(self, transport, data, finish=False):
if self.is_chunked():
data = http_chunks(data, finish)
elif data:
data = (data,)
else:
return
for chunk in data:
transport.write(chunk)
async def _write_streamed_data(self, transport):
for data in self.body:
try:
data = await data
except TypeError:
pass
self._write_body_data(transport, data)
self._write_body_data(transport, b'', True)
# PROXY INTERNALS
def _set_proxy(self, proxies):
url = urlparse(self.url)
request_proxies = self.client.proxies.copy()
if proxies:
request_proxies.update(proxies)
self.proxies = request_proxies
#
if url.scheme in request_proxies:
host, port = get_hostport(url.scheme, url.netloc)
no_proxy = [n for n in request_proxies.get('no', '').split(',')
if n]
if not any(map(host.endswith, no_proxy)):
proxy_url = request_proxies[url.scheme]
if url.scheme in tls_schemes:
self._tunnel = proxy_url
else:
self._proxy = proxy_url
class HttpResponse(ProtocolConsumer):
"""A :class:`.ProtocolConsumer` for the HTTP client | |
a search
# engine name (such as `DEFAULT`, or `ddg`) to a URL with a `{}`
# placeholder. The placeholder will be replaced by the search term, use
# `{{` and `}}` for literal `{`/`}` braces. The following further
# placeholds are defined to configure how special characters in the
# search terms are replaced by safe characters (called 'quoting'): *
# `{}` and `{semiquoted}` quote everything except slashes; this is the
# most sensible choice for almost all search engines (for the search
# term `slash/and&` this placeholder expands to `slash/and%26amp`).
# * `{quoted}` quotes all characters (for `slash/and&` this
# placeholder expands to `slash%2Fand%26amp`). * `{unquoted}` quotes
# nothing (for `slash/and&` this placeholder expands to
# `slash/and&`). * `{0}` means the same as `{}`, but can be used
# multiple times. The search engine named `DEFAULT` is used when
# `url.auto_search` is turned on and something else than a URL was
# entered to be opened. Other search engines can be used by prepending
# the search engine name to the search term, e.g. `:open google
# qutebrowser`.
# Type: Dict
c.url.searchengines = {'DEFAULT': 'https://duckduckgo.com/?q={}'}
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
# Type: List of QtColor, or QtColor
c.colors.completion.fg = '#333333'
# Background color of the completion widget for odd rows.
# Type: QssColor
c.colors.completion.odd.bg = '#ffffff'
# Background color of the completion widget for even rows.
# Type: QssColor
c.colors.completion.even.bg = '#ffffff'
# Foreground color of completion widget category headers.
# Type: QtColor
c.colors.completion.category.fg = '#333333'
# Background color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.bg = '#f3df30'
# Top border color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.border.top = '#f3df30'
# Bottom border color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.border.bottom = '#f3df30'
# Foreground color of the selected completion item.
# Type: QtColor
c.colors.completion.item.selected.fg = '#333333'
# Background color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.bg = '#30b7f3'
# Top border color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.border.top = '#30b7f3'
# Bottom border color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.border.bottom = '#30b7f3'
# Foreground color of the matched text in the selected completion item.
# Type: QtColor
c.colors.completion.item.selected.match.fg = '#ff4444'
# Foreground color of the matched text in the completion.
# Type: QtColor
c.colors.completion.match.fg = '#63f330'
# Color of the scrollbar handle in the completion view.
# Type: QssColor
c.colors.completion.scrollbar.fg = '#cccccc'
# Color of the scrollbar in the completion view.
# Type: QssColor
c.colors.completion.scrollbar.bg = '#333333'
# Background color of the context menu. If set to null, the Qt default
# is used.
# Type: QssColor
c.colors.contextmenu.menu.bg = None
# Background color of disabled items in the context menu. If set to
# null, the Qt default is used.
# Type: QssColor
c.colors.contextmenu.disabled.bg = None
# Foreground color of disabled items in the context menu. If set to
# null, the Qt default is used.
# Type: QssColor
c.colors.contextmenu.disabled.fg = None
# Background color for the download bar.
# Type: QssColor
c.colors.downloads.bar.bg = '#ffffff'
# Color gradient start for download text.
# Type: QtColor
c.colors.downloads.start.fg = '#333333'
# Color gradient start for download backgrounds.
# Type: QtColor
c.colors.downloads.start.bg = '#ffffff'
# Color gradient end for download text.
# Type: QtColor
c.colors.downloads.stop.fg = '#30b7f3'
# Color gradient stop for download backgrounds.
# Type: QtColor
c.colors.downloads.stop.bg = '#ffffff'
# Color gradient interpolation system for download text.
# Type: ColorSystem
# Valid values:
# - rgb: Interpolate in the RGB color system.
# - hsv: Interpolate in the HSV color system.
# - hsl: Interpolate in the HSL color system.
# - none: Don't show a gradient.
c.colors.downloads.system.fg = 'rgb'
# Color gradient interpolation system for download backgrounds.
# Type: ColorSystem
# Valid values:
# - rgb: Interpolate in the RGB color system.
# - hsv: Interpolate in the HSV color system.
# - hsl: Interpolate in the HSL color system.
# - none: Don't show a gradient.
c.colors.downloads.system.bg = 'rgb'
# Foreground color for downloads with errors.
# Type: QtColor
c.colors.downloads.error.fg = '#F33051'
# Background color for downloads with errors.
# Type: QtColor
c.colors.downloads.error.bg = '#ffffff'
# Font color for hints.
# Type: QssColor
c.colors.hints.fg = 'white'
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
# Type: QssColor
c.colors.hints.bg = '#30b7f3'
# Font color for the matched part of hints.
# Type: QtColor
c.colors.hints.match.fg = '#333333'
# Text color for the keyhint widget.
# Type: QssColor
c.colors.keyhint.fg = '#cccccc'
# Highlight color for keys to complete the current keychain.
# Type: QssColor
c.colors.keyhint.suffix.fg = '#30b7f3'
# Background color of the keyhint widget.
# Type: QssColor
c.colors.keyhint.bg = '#333333'
# Foreground color of an error message.
# Type: QssColor
c.colors.messages.error.fg = '#f33051'
# Background color of an error message.
# Type: QssColor
c.colors.messages.error.bg = '#ffffff'
# Border color of an error message.
# Type: QssColor
c.colors.messages.error.border = '#f33051'
# Foreground color of a warning message.
# Type: QssColor
c.colors.messages.warning.fg = '#F38630'
# Background color of a warning message.
# Type: QssColor
c.colors.messages.warning.bg = '#ffffff'
# Border color of a warning message.
# Type: QssColor
c.colors.messages.warning.border = '#F38630'
# Foreground color of an info message.
# Type: QssColor
c.colors.messages.info.fg = '#333333'
# Background color of an info message.
# Type: QssColor
c.colors.messages.info.bg = '#ffffff'
# Border color of an info message.
# Type: QssColor
c.colors.messages.info.border = '#333333'
# Foreground color for prompts.
# Type: QssColor
c.colors.prompts.fg = '#333333'
# Border used around UI elements in prompts.
# Type: String
c.colors.prompts.border = '0px solid #30b7f3'
# Background color for prompts.
# Type: QssColor
c.colors.prompts.bg = '#ffffff'
# Background color for the selected item in filename prompts.
# Type: QssColor
c.colors.prompts.selected.bg = '#30b7f3'
# Foreground color of the statusbar.
# Type: QssColor
c.colors.statusbar.normal.fg = '#333333'
# Background color of the statusbar.
# Type: QssColor
c.colors.statusbar.normal.bg = '#ffffff'
# Foreground color of the statusbar in insert mode.
# Type: QssColor
c.colors.statusbar.insert.fg = '#30b7f3'
# Background color of the statusbar in insert mode.
# Type: QssColor
c.colors.statusbar.insert.bg = '#ffffff'
# Foreground color of the statusbar in passthrough mode.
# Type: QssColor
c.colors.statusbar.passthrough.fg = '#9e30f3'
# Background color of the statusbar in passthrough mode.
# Type: QssColor
c.colors.statusbar.passthrough.bg = '#ffffff'
# Foreground color of the statusbar in private browsing mode.
# Type: QssColor
c.colors.statusbar.private.fg = '#333333'
# Background color of the statusbar in private browsing mode.
# Type: QssColor
c.colors.statusbar.private.bg = '#cccccc'
# Foreground color of the statusbar in command mode.
# Type: QssColor
c.colors.statusbar.command.fg = '#333333'
# Background color of the statusbar in command mode.
# Type: QssColor
c.colors.statusbar.command.bg = '#ffffff'
# Foreground color of the statusbar in private browsing + command mode.
# Type: QssColor
c.colors.statusbar.command.private.fg = '#333333'
# Background color of the statusbar in private browsing + command mode.
# Type: QssColor
c.colors.statusbar.command.private.bg = '#cccccc'
# Foreground color of the statusbar in caret mode.
# Type: QssColor
c.colors.statusbar.caret.fg = 'white'
# Background color of the statusbar in caret mode.
# Type: QssColor
c.colors.statusbar.caret.bg = '#9e30f3'
# Foreground color of the statusbar in caret mode with a selection.
# Type: QssColor
c.colors.statusbar.caret.selection.fg = 'black'
# Background color of the statusbar in caret mode with a selection.
# Type: QssColor
c.colors.statusbar.caret.selection.bg = '#9e30f3'
# Background color of the progress bar.
# Type: QssColor
c.colors.statusbar.progress.bg = '#30b7f3'
# Default foreground color of the URL in the statusbar.
# Type: QssColor
c.colors.statusbar.url.fg = '#333333'
# Foreground color of the URL in the statusbar on error.
# Type: QssColor
c.colors.statusbar.url.error.fg = '#f33051'
# Foreground color of the URL in the statusbar for hovered links.
# Type: QssColor
c.colors.statusbar.url.hover.fg = '#30b7f3'
# Foreground color of the URL in the statusbar on successful load
# (http).
# Type: QssColor
c.colors.statusbar.url.success.http.fg = '#f38630'
# Foreground color of the URL in the statusbar on successful load
# (https).
# Type: QssColor
c.colors.statusbar.url.success.https.fg = '#333333'
# Foreground color of the URL in the statusbar when there's a warning.
# Type: QssColor
c.colors.statusbar.url.warn.fg = '#F38630'
# Background color of the tab bar.
# Type: QssColor
c.colors.tabs.bar.bg = '#ffffff'
# Color gradient start for the tab indicator.
# Type: QtColor
c.colors.tabs.indicator.start = '#ffffff'
# Color gradient end for the tab indicator.
# Type: QtColor
c.colors.tabs.indicator.stop = '#ffffff'
# Color for the tab indicator on errors.
# Type: QtColor
c.colors.tabs.indicator.error = '#f33051'
# Color gradient interpolation system for the tab indicator.
# Type: ColorSystem
# Valid values:
# - rgb: Interpolate in the RGB color system.
# - hsv: Interpolate in the HSV color system.
# - hsl: Interpolate in the HSL color system.
# - none: Don't show a gradient.
c.colors.tabs.indicator.system = 'rgb'
# Foreground color of unselected odd tabs.
# Type: QtColor
c.colors.tabs.odd.fg = '#333333'
# Background color of unselected odd tabs.
# Type: QtColor
c.colors.tabs.odd.bg = '#ffffff'
# Foreground color of unselected even tabs.
# Type: QtColor
c.colors.tabs.even.fg = '#333333'
# Background color of unselected even tabs.
# Type: QtColor
c.colors.tabs.even.bg = '#ffffff'
# Foreground color of selected odd tabs.
# Type: QtColor
c.colors.tabs.selected.odd.fg = '#333333'
# Background color of selected odd tabs.
# Type: QtColor
c.colors.tabs.selected.odd.bg = '#30b7f3'
# Foreground color of selected even tabs.
# Type: QtColor
c.colors.tabs.selected.even.fg = '#333333'
# Background color of selected even tabs.
# Type: QtColor
c.colors.tabs.selected.even.bg = '#30b7f3'
# Foreground color of pinned unselected odd tabs.
# Type: QtColor
c.colors.tabs.pinned.odd.fg = '#333333'
# Background color of pinned | |
["loads"],
["sectionid", "devicenumber", "loadtype", "connection"],
mapp_loads,
)
)
#########################################
# #
# CUSTOMER LOADS #
# #
#########################################
#
self.customer_loads.update(
self.parser_helper(
line,
["customer_loads"],
[
"sectionid",
"devicenumber",
"loadtype",
"customernumber",
"customertype",
"loadmodelid",
"valuetype",
"loadphase",
"value1",
"value2",
"connectedkva",
"numberofcustomer",
],
mapp_customer_loads,
)
)
#########################################
# #
# CUSTOMER CLASS #
# #
#########################################
#
self.customer_class.update(
self.parser_helper(
line,
["customer_class"],
[
"id",
"constantpower",
"constantcurrent",
"constantimpedance",
"powerfactor",
"constantimpedancezp",
"constantimpedancezq",
"constantcurrentip",
"constantcurrentiq",
"constantpowerpp",
"constantpowerpq",
],
mapp_customer_class,
)
)
duplicate_loads = set()
for sectionID in self.customer_loads.keys():
if sectionID.endswith("*"):
duplicate_loads.add(sectionID.lower().strip("*"))
for sectionID, settings in self.customer_loads.items():
sectionID = sectionID.strip("*").lower()
if sectionID in self.loads:
load_data = self.loads[sectionID]
else:
load_data = {}
if "connectedkva" in settings:
connectedkva = float(settings["connectedkva"])
else:
connectedkva = None
if "valuetype" in settings:
value_type = int(settings["valuetype"])
if "value1" in settings and "value2" in settings:
if (
float(settings["value1"]) == 0.0
and float(settings["value2"]) == 0.0
):
p = 0
q = 0
elif value_type == 0: # P and Q are given
try:
p, q = float(settings["value1"]), float(settings["value2"])
except:
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
elif value_type == 1: # KVA and PF are given
try:
kva, PF = (
float(settings["value1"]),
float(settings["value2"]) * 0.01,
)
if kva == 0 and "connectedkva" in settings:
kva = float(settings["connectedkva"])
p = kva * PF
q = math.sqrt(kva ** 2 - p ** 2)
except:
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
elif value_type == 2: # P and PF are given
try:
p, PF = float(settings["value1"]), float(settings["value2"])
if 0 <= PF <= 1:
q = p * math.sqrt((1 - PF ** 2) / PF ** 2)
elif 1 < PF <= 100:
PF /= 100.0
q = p * math.sqrt((1 - PF ** 2) / PF ** 2)
else:
logger.warning("problem with PF")
logger.warning(PF)
except:
logger.warning("Skipping load on section {}".format(sectionID))
continue
elif value_type == 3: # AMP and PF are given
# TODO
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
if p >= 0 or q >= 0:
if "loadphase" in settings:
phases = settings["loadphase"]
else:
phases = []
fused = False
if sectionID in duplicate_loads:
fusion = True
if sectionID in self._loads:
api_load = self._loads[sectionID]
fused = True
elif p != 0:
api_load = Load(model)
else:
fusion = False
api_load = Load(model)
if fusion and p == 0:
# logger.warning(
# "WARNING:: Skipping duplicate load on section {} with p=0".format(sectionID)
# )
continue
try:
if fusion and sectionID in self._loads:
api_load.name += "_" + reduce(
lambda x, y: x + "_" + y, phases
)
else:
api_load.name = (
"Load_"
+ sectionID
+ "_"
+ reduce(lambda x, y: x + "_" + y, phases)
)
except:
pass
try:
if not (fusion and sectionID in self._loads):
if connectedkva is not None:
api_load.transformer_connected_kva = (
connectedkva * 10 ** 3
) # DiTTo in var
elif connectedkva is not None:
if api_load.transformer_connected_kva is None:
api_load.transformer_connected_kva = (
connectedkva * 10 ** 3
) # DiTTo in var
else:
api_load.transformer_connected_kva += (
connectedkva * 10 ** 3
) # DiTTo in var
except:
pass
try:
if not (fusion and sectionID in self._loads):
api_load.connection_type = self.connection_configuration_mapping(
load_data["connection"]
)
except:
pass
if not (fusion and sectionID in self._loads):
if (
"loadtype" in settings
and settings["loadtype"] in self.customer_class
):
load_type_data = self.customer_class[settings["loadtype"]]
else:
load_type_data = {}
try:
if not (fusion and sectionID in self._loads):
api_load.connecting_element = self.section_phase_mapping[
sectionID
]["fromnodeid"]
except:
pass
api_load.feeder_name = self.section_feeder_mapping[sectionID]
api_load.num_users = float(settings["numberofcustomer"])
for ph in phases:
try:
api_phase_load = PhaseLoad(model)
except:
raise ValueError(
"Unable to instanciate PhaseLoad DiTTo object."
)
try:
api_phase_load.phase = ph
except:
pass
try:
api_phase_load.p, api_phase_load.q = (
10 ** 3 * p / len(phases),
10 ** 3 * q / len(phases),
)
except:
pass
# ZIP load parameters
try:
api_phase_load.ppercentcurrent = (
float(load_type_data["constantcurrentip"]) / 100.0
)
api_phase_load.qpercentcurrent = (
float(load_type_data["constantcurrentiq"]) / 100.0
)
api_phase_load.ppercentpower = (
float(load_type_data["constantpowerpp"]) / 100.0
)
api_phase_load.qpercentpower = (
float(load_type_data["constantpowerpq"]) / 100.0
)
api_phase_load.ppercentimpedance = (
float(load_type_data["constantimpedancezp"]) / 100.0
)
api_phase_load.qpercentimpedance = (
float(load_type_data["constantimpedancezq"]) / 100.0
)
# api_phase_load.use_zip=1
# api_phase_load.model=8
except:
pass
# CYME store phase loads with P=0 and Q=0.
# Do not add them to DiTTo (otherwise it will make the validation
# on the number of objects fail since we will have many more loads than there actually are...)
# if api_phase_load.p!=0 or api_phase_load.q!=0:
api_load.phase_loads.append(api_phase_load)
self._loads[sectionID] = api_load
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
if not fused: #Because mutiple loads on different phases are joined into a single one
self.section_duplicates[sectionID].append(api_load)
return 1
def parse_dg(self, model):
""" Parse the Distributed Generation from CYME to DiTTo. May be respresented as ECGs or PVs.
This reads the objets [CONVERTER], [CONVERTER CONTROL SETTING], [LONG TERM DYNAMICS CURVE EXT] [DGGENERATIONMODEL] and in the case when PV is included [PHOTOVOLTAIC SETTINGS]"""
self._dgs = []
self.converter = {}
self.converter_settings = {}
self.long_term_dynamics = {}
self.photovoltaic_settings = {}
self.bess = {}
self.bess_settings = {}
self.dg_generation = {}
mapp_converter = {
"devicenumber": 0,
"devicetype": 1,
"converterrating": 2,
"activepowerrating": 3,
"reactivepowerrating": 4,
"minimumpowerfactor": 5,
"powerfalllimit": 23,
"powerriselimit": 24,
"risefallunit": 25,
}
mapp_converter_settings = {
"devicenumber": 0,
"devicetype": 1,
"controlindex": 2,
"timetriggerindex": 3,
"controltype": 4,
"fixedvarinjection": 5,
"injectionreference": 6,
"convertercontrolid": 7,
"powerreference": 8,
"powerfactor": 9,
}
mapp_photovoltaic_settings = {
"sectionid": 0,
"location": 1,
"devicenumber": 2,
"equipmentid": 6,
"eqphase": 7,
"ambienttemperature": 11,
}
mapp_bess = {
"id": 0,
"ratedstorageenergy": 1,
"maxchargingpower": 2,
"maxdischargingpower": 3,
"chargeefficiency": 4,
"dischargeefficiency": 5,
}
mapp_bess_settings = {
"sectionid": 0,
"devicenumber": 2,
"equipmentid": 6,
"phase": 7,
"maximumsoc": 10,
"minimumsoc": 11,
"initialsoc": 16,
}
mapp_bess = {
"id": 0,
"ratedstorageenergy": 1,
"maxchargingpower": 2,
"maxdischargingpower": 3,
"chargeefficiency": 4,
"dischargeefficiency": 5,
}
mapp_bess_settings = {
"sectionid": 0,
"devicenumber": 2,
"equipmentid": 6,
"phase": 7,
"maximumsoc": 10,
"minimumsoc": 11,
"initialsoc": 16,
}
mapp_long_term_dynamics = {
"devicenumber": 0,
"devicetype": 1,
"adjustmentsettings": 2,
"powercurvemodel": 3,
}
mapp_dg_generation_model = {
"devicenumber": 0,
"devicetype": 1,
"loadmodelname": 2,
"activegeneration": 3,
"powerfactor": 4,
}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# CONVERTER #
# #
#########################################
self.converter.update(
self.parser_helper(
line,
["converter"],
[
"devicenumber",
"devicetype",
"converterrating",
"activepowerrating",
"reactivepowerrating",
"minimumpowerfactor",
"powerfalllimit",
"powerriselimit",
"risefallunit",
],
mapp_converter,
{"type": "converter"},
)
)
#########################################
# #
# CONVERTER CONTROL SETTINGS #
# #
#########################################
self.converter_settings.update(
self.parser_helper(
line,
["converter_control_settings"],
[
"devicenumber",
"devicetype",
"controltype",
"fixedvarinjection",
"injectionreference",
"convertercontrolid",
"powerreference",
"powerfactor",
],
mapp_converter_settings,
{"type": "converter_settings"},
)
)
#########################################
# #
# PHOTOVOLTAIC SETTINGS #
# #
#########################################
self.photovoltaic_settings.update(
self.parser_helper(
line,
["photovoltaic_settings"],
["sectionid", "devicenumber", "eqphase", "ambienttemperature"],
mapp_photovoltaic_settings,
{"type": "photovoltaic_settings"},
)
)
#########################################
# #
# BESS SETTINGS #
# #
#########################################
self.bess_settings.update(
self.parser_helper(
line,
["bess_settings"],
[
"sectionid",
"devicenumber",
"equipmentid",
"phase",
"maximumsoc",
"minimumsoc",
"initialsoc",
],
mapp_bess_settings,
{"type": "bess_settings"},
)
)
#########################################
# #
# LONG TERM DYNAMICS CURVE EXT #
# #
#########################################
self.long_term_dynamics.update(
self.parser_helper(
line,
["long_term_dynamics_curve_ext"],
[
"devicenumber",
"devicetype",
"adjustmentsettings",
"powercurvemodel",
],
mapp_long_term_dynamics,
{"type": "long_term_dynamics"},
)
)
#########################################
# #
# DGGENERATIONMODEL #
# #
#########################################
self.dg_generation.update(
self.parser_helper(
line,
["dggenerationmodel"],
[
"devicenumber",
"devicetype",
"activegeneration",
"powerfactor",
"loadmodelname",
],
mapp_dg_generation_model,
{"type": "dg_generation_model"},
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# BESS #
# #
#########################################
#
self.bess.update(
self.parser_helper(
line,
["bess"],
[
"id",
"ratedstorageenergy",
"maxchargingpower",
"maxdischargingpower",
"chargeefficiency",
"dischargeefficiency",
],
mapp_bess,
)
)
api_photovoltaics = {}
api_bessi = {}
for sectionID, settings in self.photovoltaic_settings.items():
try:
api_photovoltaic = Photovoltaic(model)
except:
raise ValueError(
"Unable to instanciate photovoltaic {id}".format(id=sectionID)
)
try:
api_photovoltaic.name = "PV_" + settings["devicenumber"].lower()
api_photovoltaic.feeder_name = self.section_feeder_mapping[
sectionID.lower()
]
api_photovoltaics[settings["devicenumber"].lower()] = api_photovoltaic
except:
raise ValueError(
"Unable to set photovoltaic name for {id}".format(id=sectionID)
)
try:
api_photovoltaic.temperature = float(
settings["ambienttemperature"]
) # Not included in ECG SETTINGS
except:
pass
try:
api_photovoltaic.phases = [
Unicode(k) for k in list(settings["eqphase"])
]
except:
pass
try:
api_photovoltaic.connecting_element = self.section_phase_mapping[
sectionID.lower()
]["fromnodeid"]
except:
pass
if not sectionID | |
__gen_func_exp__(x,table_name)
if tmp_exp is None:
print >>sys.stderr,"Internal Error:__gen_func_exp__"
exit(29)
new_list.append(tmp_exp)
else:
new_list.append(x)
ret_exp = ystree.YFuncExp(exp.func_name,new_list)
return ret_exp
##generate a new list ,which contains the exps with the specified table name.
def __gen_join_list__(cur_list,new_list,table_name):
count = 0
tmp_exp = ystree.YConsExp("\"NULL\"","TEXT")
for exp in cur_list:
if isinstance(exp,ystree.YRawColExp):
if exp.table_name != table_name:
new_list.append(tmp_exp)
else:
new_list.append(exp)
elif isinstance(exp,ystree.YFuncExp):
tmp_exp = __gen_func_exp__(exp,table_name)
new_list.append(tmp_exp)
def __gen_join_where__(cur_exp,table_name):
ret_exp = None
if not isinstance(cur_exp,ystree.YFuncExp):
return None
if cur_exp.func_name in bool_func_dict.keys():
for x in cur_exp.parameter_list:
if not isinstance(x,ystree.YFuncExp):
print >>sys.stderr,"Internal Error:__gen_join_where__"
exit(29)
tmp_exp = __gen_join_where__(x,table_name)
if ret_exp == None:
ret_exp = tmp_exp
else:
para_list = []
para_list.append(ret_exp)
para_list.append(tmp_exp)
ret_exp = ystree.YFuncExp(cur_exp.func_name,para_list)
return ret_exp
else:
###fix me here: how to handle the first para if the func is IS
if cur_exp.func_name == "IS":
tmp_bool = True
para1 = cur_exp.parameter_list[0]
if isinstance(para1,ystree.YRawColExp):
if para1.table_name != table_name:
tmp_bool = False
if tmp_bool == True:
ret_exp = copy.deepcopy(cur_exp)
else:
ret_exp = ystree.YConsExp("FALSE","BOOLEAN")
else:
tmp_bool = True
para1 = cur_exp.parameter_list[0]
para2 = cur_exp.parameter_list[1]
if isinstance(para1,ystree.YRawColExp):
if para1.table_name != table_name:
tmp_bool = False
if isinstance(para2,ystree.YRawColExp):
if para2.table_name != table_name:
tmp_bool = False
if tmp_bool == True:
ret_exp = copy.deepcopy(cur_exp)
else:
ret_exp = ystree.YConsExp("FALSE","BOOLEAN")
return ret_exp
###### whether it is a self join
def __self_join__(tree):
if not isinstance(tree.left_child,ystree.TableNode):
return False
if not isinstance(tree.right_child,ystree.TableNode):
return False
if len(tree.table_alias_dict.values()) !=2:
return False
t_name1 = tree.table_alias_dict.values()[0]
t_name2 = tree.table_alias_dict.values()[1]
if t_name1 != t_name2:
return False
else:
return True
def __join_gen_mr__(tree,left_name,fo):
### join map part
line_buffer = "line_buf"
if tree.select_list is None:
print >>sys.stderr,"Internal Error:__join_gen_mr__"
exit(29)
self_join_bool = False
self_join_bool = __self_join__(tree)
### get map output key
left_key_list = []
right_key_list = []
if tree.join_explicit is True:
__get_join_key__(tree.join_condition.on_condition_exp,left_key_list,"LEFT")
__get_join_key__(tree.join_condition.on_condition_exp,right_key_list,"RIGHT")
elif tree.join_condition is not None:
__get_join_key__(tree.join_condition.where_condition_exp,left_key_list,"LEFT")
__get_join_key__(tree.join_condition.where_condition_exp,right_key_list,"RIGHT")
if len(left_key_list) == 0:
new_exp = ystree.YConsExp(1,"INTEGER")
left_key_list.append(new_exp)
if len(right_key_list) == 0:
new_exp = ystree.YConsExp(1,"INTEGER")
right_key_list.append(new_exp)
left_key_type = __get_key_value_type__(left_key_list)
right_key_type = __get_key_value_type__(right_key_list)
if left_key_type != right_key_type:
print >>sys.stderr,"Internal Error:__join_gen_mr__"
exit(29)
map_key_type = left_key_type
map_value_type = "Text" ## we need to add tag to differentiate the data from left table and right table
print >>fo,"\tpublic static class Map extends Mapper<Object, Text,"+map_key_type+","+map_value_type+">{\n"
print >>fo, "\t\tprivate int left = 0;"
print >>fo, "\t\tpublic void setup(Context context) throws IOException, InterruptedException {\n"
print >>fo, "\t\t\tint last_index = -1, start_index = -1;"
print >>fo, "\t\t\tString path = ((FileSplit)context.getInputSplit()).getPath().toString();"
print >>fo, "\t\t\tlast_index = path.lastIndexOf(\'/\');"
print >>fo,"\t\t\tlast_index = last_index - 1;"
print >>fo, "\t\t\tstart_index = path.lastIndexOf(\'/\',last_index);"
print >>fo, "\t\t\tString f_name = path.substring(start_index+1,last_index+1);"
print >>fo, "\t\t\tif(f_name.compareTo(\"" + left_name + "\") == 0 )"
print >>fo, "\t\t\t\tleft = 1;"
print >>fo,"\t\t}"
print >>fo,"\t\tpublic void map(Object key, Text value,Context context) throws IOException,InterruptedException{\n"
print >>fo,"\t\t\tString line = value.toString();"
print >>fo, "\t\t\tint prev=0,i=0,n=0;"
if self_join_bool is False:
print >>fo,"\t\t\tif(this.left == 1){\n"
else:
if isinstance(tree.left_child,ystree.TableNode):
exp_list = list(tree.left_child.select_list.tmp_exp_list)
if tree.left_child.where_condition is not None:
exp_list.append(tree.left_child.where_condition.where_condition_exp)
tmp1 = __get_max_index__(exp_list)
else:
tmp1 = len(tree.left_child.select_list.tmp_exp_list)
if isinstance(tree.right_child,ystree.TableNode):
exp_list = list(tree.right_child.select_list.tmp_exp_list)
if tree.right_child.where_condition is not None:
exp_list.append(tree.right_child.where_condition.where_condition_exp)
tmp2 = __get_max_index__(exp_list)
else:
tmp2 = len(tree.right_child.select_list.tmp_exp_list)
if tmp1>tmp2:
max_index = tmp1
else:
max_index = tmp2
print >>fo,"\t\t\t\tString[] "+ line_buffer +" = new String["+ str(max_index)+"];"
print >>fo, "\t\t\t\tfor(i=0,n=0,prev=0;i<line.length();i++){\n"
print >>fo, "\t\t\t\t\tif (line.charAt(i) == \'|\'){"
print >>fo, "\t\t\t\t\t\t" + line_buffer + "[n] = line.substring(prev,i);"
print >>fo, "\t\t\t\t\t\tn = n+1;"
print >>fo, "\t\t\t\t\t\tprev = i+1;"
print >>fo, "\t\t\t\t\t}"
print >>fo, "\t\t\t\t\tif(n == "+str(max_index)+")"
print >>fo, "\t\t\t\t\t\tbreak;"
print >>fo,"\t\t\t\t}\n"
print >>fo,"\t\t\tif(n<" + str(max_index) + ")"
print >>fo,"\t\t\t\t"+line_buffer+ "[n] = line.substring(prev,i);"
buf_dict = {}
buf_dict["LEFT"] = line_buffer
left_key = __gen_mr_value__(left_key_list,left_key_type,buf_dict)
buf_dict = {}
for x in tree.left_child.table_list:
if x not in buf_dict.keys():
buf_dict[x] = line_buffer
if isinstance(tree.left_child,ystree.TableNode):
if tree.left_child.table_name not in buf_dict.keys():
buf_dict[tree.left_child.table_name] = line_buffer
left_value_type = map_value_type
right_value_type = map_value_type
### scan the input of the left child
if tree.left_child.select_list is not None:
if isinstance(tree.left_child,ystree.TableNode):
left_value = __gen_mr_value__(tree.left_child.select_list.tmp_exp_list,left_value_type,buf_dict)
exp_list = list(tree.left_child.select_list.tmp_exp_list)
if tree.left_child.where_condition is not None:
exp_list.append(tree.left_child.where_condition.where_condition_exp)
max_index = __get_max_index__(exp_list)
else:
left_value = ""
for i in range(0,len(tree.left_child.select_list.tmp_exp_list)):
left_value += line_buffer + "[" + str(i) + "]"
left_value += "+ \"|\"+"
left_value = left_value[:-1]
max_index = len(tree.left_child.select_list.tmp_exp_list)
if self_join_bool is False:
print >>fo,"\t\t\t\tString[] "+ line_buffer +" = new String["+ str(max_index)+"];"
print >>fo, "\t\t\t\tfor(i=0,n=0,prev=0;i<line.length();i++){\n"
print >>fo, "\t\t\t\t\tif (line.charAt(i) == \'|\'){"
print >>fo, "\t\t\t\t\t\t" + line_buffer + "[n] = line.substring(prev,i);"
print >>fo, "\t\t\t\t\t\tn = n+1;"
print >>fo, "\t\t\t\t\t\tprev = i+1;"
print >>fo, "\t\t\t\t\t}"
print >>fo, "\t\t\t\t\tif(n == "+str(max_index)+")"
print >>fo, "\t\t\t\t\t\tbreak;"
print >>fo,"\t\t\t\t}\n"
print >>fo,"\t\t\tif(n<" + str(max_index) + ")"
print >>fo,"\t\t\t\t"+line_buffer+ "[n] = line.substring(prev,i);"
if not isinstance(tree.left_child,ystree.TableNode) or tree.left_child.where_condition is None:
tmp_output = "\t\t\t\tcontext.write(new " + left_key_type + "(" + left_key + ")"
tmp_output += ", "
tmp_output += "new " + left_value_type + "(\"L\"+\"|\" +"+ left_value +")"
tmp_output += ");"
print >>fo,tmp_output
else:
where_str = "\t\t\t\tif("
where_str +=__where_convert_to_java__(tree.left_child.where_condition.where_condition_exp,buf_dict)
where_str += "){\n"
print >>fo,where_str
tmp_output = "\t\t\t\t\tcontext.write(new " + left_key_type + "(" + left_key + ")"
tmp_output += ", "
tmp_output += "new " + left_value_type + "(\"L\"+\"|\"+"+ left_value +")"
tmp_output += ");"
print >>fo,tmp_output
print >>fo,"\t\t\t\t}" # end of if
if self_join_bool is False:
print >>fo,"\t\t\t}else{\n" ##end of left child
### scan the input of the right child
buf_dict = {}
buf_dict["RIGHT"] = line_buffer
right_key = __gen_mr_value__(right_key_list,right_key_type,buf_dict)
buf_dict = {}
for x in tree.right_child.table_list:
if x not in buf_dict.keys():
buf_dict[x] = line_buffer
if isinstance(tree.right_child,ystree.TableNode):
if tree.right_child.table_name not in buf_dict.keys():
buf_dict[tree.right_child.table_name] = line_buffer
if tree.right_child.select_list is not None:
if isinstance(tree.right_child,ystree.TableNode):
right_value = __gen_mr_value__(tree.right_child.select_list.tmp_exp_list,right_value_type,buf_dict)
exp_list = tree.right_child.select_list.tmp_exp_list
if tree.right_child.where_condition is not None:
exp_list.append(tree.right_child.where_condition.where_condition_exp)
max_index = __get_max_index__(exp_list)
else:
right_value = ""
for i in range(0,len(tree.right_child.select_list.tmp_exp_list)):
right_value += line_buffer + "[" + str(i) + "]"
right_value += "+ \"|\"+"
right_value = right_value[:-1]
max_index = len(tree.right_child.select_list.tmp_exp_list)
if self_join_bool is False:
print >>fo,"\t\t\t\tString[] "+ line_buffer +" = new String["+ str(max_index)+"];"
print >>fo, "\t\t\t\tfor(i=0,n=0,prev=0;i<line.length();i++){\n"
print >>fo, "\t\t\t\t\tif (line.charAt(i) == \'|\'){"
print >>fo, "\t\t\t\t\t\t" + line_buffer + "[n] = line.substring(prev,i);"
print >>fo, "\t\t\t\t\t\tn = n+1;"
print >>fo, "\t\t\t\t\t\tprev = i+1;"
print >>fo, "\t\t\t\t\t}"
print >>fo, "\t\t\t\t\tif(n == "+str(max_index)+")"
print >>fo, "\t\t\t\t\t\tbreak;"
print >>fo,"\t\t\t\t}\n"
print >>fo,"\t\t\tif(n<" + str(max_index) + ")"
print >>fo,"\t\t\t\t"+line_buffer+ "[n] = line.substring(prev,i);"
if not isinstance(tree.right_child,ystree.TableNode) or tree.right_child.where_condition is None:
tmp_output = "\t\t\t\tcontext.write(new " + right_key_type + "(" + right_key + ")"
tmp_output += ", "
tmp_output += "new " + right_value_type + "(\"R\"+\"|\" +"+ right_value +")"
tmp_output += ");"
print >>fo, tmp_output
else:
where_str = "\t\t\t\tif("
where_str +=__where_convert_to_java__(tree.right_child.where_condition.where_condition_exp,buf_dict)
where_str += "){\n"
print >>fo,where_str
tmp_output = "\t\t\t\t\tcontext.write(new " + right_key_type + "(" + right_key + ")"
tmp_output += ", "
tmp_output += "new " + right_value_type + "(\"R\"+\"|\" +"+ right_value +")"
tmp_output += ");"
print >>fo, tmp_output
print >>fo,"\t\t\t\t}" # end of if
if self_join_bool is False:
print >>fo,"\t\t\t}\n"
print >>fo,"\t\t}\n" ### end of map func
print >>fo,"\t}\n" ## end of map class
### join reduce part
left_array = "al_left"
right_array = "al_right"
reduce_key_type = "NullWritable"
reduce_value_type = "Text"
print >>fo,"\tpublic static class Reduce extends Reducer<"+ map_key_type+","+map_value_type+","+reduce_key_type+","+reduce_value_type+">{\n"
print >>fo, "\t\tpublic void reduce("+map_key_type+" key, Iterable<"+map_value_type+"> v, Context context) throws IOException,InterruptedException{\n"
print >>fo, "\t\t\tIterator values = v.iterator();"
print >>fo,"\t\t\tArrayList "+ left_array +" = new ArrayList();"
print >>fo,"\t\t\tArrayList "+ right_array +" = new ArrayList();"
print >>fo,"\t\t\twhile(values.hasNext()){\n"
print >>fo,"\t\t\t\tString tmp = values.next().toString();"
print >>fo,"\t\t\t\tif(tmp.charAt(0) == \'L\'){\n"
print >>fo,"\t\t\t\t\t"+ left_array + ".add(tmp.substring(2));"
print >>fo,"\t\t\t\t}else{\n"
print >>fo,"\t\t\t\t\t" + right_array +".add(tmp.substring(2));"
print >>fo,"\t\t\t\t}\n"
print >>fo,"\t\t\t}\n" ### end of while
print >>fo,"\t\t\tNullWritable key_op = NullWritable.get();"
buf_dict = {}
left_line_buffer = "left_buf"
right_line_buffer = "right_buf"
buf_dict["LEFT"] = "left_buf"
buf_dict["RIGHT"] = "right_buf"
if tree.join_explicit is True:
join_type = tree.join_type.upper()
if join_type == "LEFT":
reduce_value = __gen_mr_value__(tree.select_list.tmp_exp_list,reduce_value_type,buf_dict)
print >>fo,"\t\t\tfor(int i=0;i<" + left_array + ".size();i++){\n"
print >>fo,"\t\t\t\tString[] " + left_line_buffer + " = ((String)" + left_array + ".get(i)).split(\"\\\|\");"
print >>fo, "\t\t\t\tif(" + right_array + ".size()>0){\n"
print >>fo,"\t\t\t\t\tfor(int j=0;j<" +right_array + ".size();j++){\n"
print >>fo,"\t\t\t\t\t\tString[] " + right_line_buffer + " = ((String)" + right_array + ".get(j)).split(\"\\\|\");"
if tree.where_condition is not None:
exp = tree.where_condition.where_condition_exp
print >>fo,"\t\t\t\t\t\tif(" + __where_convert_to_java__(exp,buf_dict) + "){\n"
tmp_output = "context.write("
#tmp_output += "new " + reduce_key_type + "(" + reduce_key + ")"
tmp_output += "key_op"
tmp_output += ", "
tmp_output | |
constants.APP_LIFECYCLE_TYPE_RBD
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_remove)
# Perform app resources actions
lifecycle_hook_info_app_remove.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info_app_remove.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RESOURCE
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_remove)
except Exception as e:
self._abort_operation(app, constants.APP_REMOVE_OP)
LOG.exception(e)
self._deregister_app_abort(app.name)
return False
self._update_app_status(app, constants.APP_UPLOAD_SUCCESS,
constants.APP_PROGRESS_COMPLETED)
# In case there is an existing alarm for previous remove failure
self._clear_app_alarm(app.name)
LOG.info("Application (%s) remove completed." % app.name)
else:
if AppOperator.is_app_aborted(app.name):
self._abort_operation(app, constants.APP_REMOVE_OP,
user_initiated=True)
else:
self._abort_operation(app, constants.APP_REMOVE_OP)
rc = False
self._deregister_app_abort(app.name)
return rc
def activate(self, rpc_app):
app = AppOperator.Application(rpc_app)
with self._lock:
return app.update_active(True)
def deactivate(self, rpc_app):
app = AppOperator.Application(rpc_app)
with self._lock:
return app.update_active(False)
def get_appname(self, rpc_app):
app = AppOperator.Application(rpc_app)
return app.name
def is_app_active(self, rpc_app):
app = AppOperator.Application(rpc_app)
return app.active
def perform_app_abort(self, rpc_app, lifecycle_hook_info_app_abort):
"""Process application abort request
This method retrieves the latest application status from the
database and sets the abort flag if the apply/update/remove
operation is still in progress. The corresponding app processing
thread will check the flag and abort the operation in the very
next opportunity. The method also stops the Armada service and
clears locks in case the app processing thread has made a
request to Armada.
:param rpc_app: application object in the RPC request
:param lifecycle_hook_info_app_abort: LifecycleHookInfo object
"""
app = AppOperator.Application(rpc_app)
# Retrieve the latest app status from the database
db_app = self._dbapi.kube_app_get(app.name)
if db_app.status in [constants.APP_APPLY_IN_PROGRESS,
constants.APP_UPDATE_IN_PROGRESS,
constants.APP_REMOVE_IN_PROGRESS]:
# Turn on the abort flag so the processing thread that is
# in progress can bail out in the next opportunity.
self._set_abort_flag(app.name)
# Stop the Armada request in case it has reached this far and
# remove locks.
# TODO(jgauld): Need to correct lock mechanism, something is no
# longer working for application aborts. The lock lingers around,
# and only automatically get cleaned up after a long period.
# Subsequent reapply fails since it we cannot get lock.
with self._lock:
self._armada.stop_armada_request()
self._armada.clear_armada_locks()
else:
# Either the previous operation has completed or already failed
LOG.info("Abort request ignored. The previous operation for app %s "
"has either completed or failed." % app.name)
def perform_app_delete(self, rpc_app, lifecycle_hook_info_app_delete):
"""Process application remove request
This method removes the application entry from the database and
performs cleanup which entails removing node labels where applicable
and purge all application files from the system.
:param rpc_app: application object in the RPC request
:param lifecycle_hook_info_app_delete: LifecycleHookInfo object
"""
app = AppOperator.Application(rpc_app)
try:
# Perform rbd actions
lifecycle_hook_info_app_delete.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_delete.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RBD
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_delete)
# Perform app resources actions
lifecycle_hook_info_app_delete.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_delete.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RESOURCE
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_delete)
self._plugins.deactivate_plugins(app)
self._dbapi.kube_app_destroy(app.name)
self._cleanup(app)
self._utils._patch_report_app_dependencies(app.name + '-' + app.version)
# One last check of app alarm, should be no-op unless the
# user deletes the application following an upload failure.
self._clear_app_alarm(app.name)
LOG.info("Application (%s) has been purged from the system." %
app.name)
msg = None
except Exception as e:
# Possible exceptions are KubeAppDeleteFailure,
# OSError and unexpectedly KubeAppNotFound
LOG.exception(e)
msg = str(e)
return msg
class Application(object):
""" Data object to encapsulate all data required to
support application related operations.
"""
def __init__(self, rpc_app):
self._kube_app = rpc_app
self.tarfile = None
self.downloaded_tarfile = False
# Directories: Installation specific, local to a controller. Not
# synced
self.inst_path = os.path.join(constants.APP_INSTALL_PATH,
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.inst_charts_dir = os.path.join(self.inst_path, 'charts')
self.inst_images_dir = os.path.join(self.inst_path, 'images')
self.inst_plugins_dir = os.path.join(self.inst_path, 'plugins')
# Files: Installation specific, local to a controller. Not synced
self.inst_armada_mfile = generate_install_manifest_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'),
self._kube_app.get('manifest_file'))
# Directories: DRBD Synced between controllers
self.sync_overrides_dir = generate_synced_helm_overrides_dir(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.sync_plugins_dir = generate_synced_app_plugins_dir(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.sync_armada_mfile_dir = cutils.generate_synced_armada_dir(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
# Files: DRBD synced between controllers
self.sync_armada_mfile = cutils.generate_synced_armada_manifest_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'),
self._kube_app.get('manifest_file'))
self.sync_imgfile = generate_synced_images_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.sync_metadata_file = cutils.generate_synced_metadata_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
# Files: FQPN formatted for the docker armada_service
self.armada_service_mfile = generate_armada_service_manifest_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'),
self._kube_app.get('manifest_file'))
self.patch_dependencies = []
self.charts = []
self.releases = []
@property
def system_app(self):
if (os.path.exists(self.sync_plugins_dir) and
os.listdir(self.sync_plugins_dir)):
return True
return False
@property
def name(self):
return self._kube_app.get('name')
@property
def version(self):
return self._kube_app.get('app_version')
@property
def status(self):
return self._kube_app.get('status')
@property
def progress(self):
return self._kube_app.get('progress')
@property
def active(self):
return self._kube_app.get('active')
@property
def recovery_attempts(self):
return self._kube_app.get('recovery_attempts')
@property
def mode(self):
return self._kube_app.get('mode')
@property
def app_metadata(self):
return self._kube_app.get('app_metadata')
def update_app_metadata(self, new_metadata):
if self.app_metadata != new_metadata:
self._kube_app.app_metadata = new_metadata
self._kube_app.save()
def update_status(self, new_status, new_progress):
self._kube_app.status = new_status
if new_progress:
self._kube_app.progress = new_progress
self._kube_app.save()
def update_active(self, active):
was_active = self.active
if active != self.active:
self._kube_app.active = active
self._kube_app.save()
return was_active
def regenerate_manifest_filename(self, new_mname, new_mfile):
self._kube_app.manifest_name = new_mname
self._kube_app.manifest_file = new_mfile
self.armada_service_mfile = generate_armada_service_manifest_fqpn(
self.name, self.version, new_mfile)
self.sync_armada_mfile = cutils.generate_synced_armada_manifest_fqpn(
self.name, self.version, new_mfile)
self.inst_armada_mfile = generate_install_manifest_fqpn(
self.name, self.version, new_mfile)
def regenerate_application_info(self, new_name, new_version, new_patch_dependencies):
self._kube_app.name = new_name
self._kube_app.app_version = new_version
new_armada_dir = cutils.generate_synced_armada_dir(
self.name, self.version)
shutil.move(self.sync_armada_mfile_dir, new_armada_dir)
shutil.rmtree(os.path.dirname(self.sync_armada_mfile_dir))
self.sync_armada_mfile_dir = new_armada_dir
new_path = os.path.join(
constants.APP_INSTALL_PATH, self.name, self.version)
shutil.move(self.inst_path, new_path)
shutil.rmtree(os.path.dirname(self.inst_path))
self.inst_path = new_path
self.inst_charts_dir = os.path.join(self.inst_path, 'charts')
self.inst_images_dir = os.path.join(self.inst_path, 'images')
self.sync_imgfile = generate_synced_images_fqpn(self.name, self.version)
self.sync_overrides_dir = generate_synced_helm_overrides_dir(self.name, self.version)
self.patch_dependencies = new_patch_dependencies
self.inst_plugins_dir = os.path.join(self.inst_path, 'plugins')
self.sync_plugins_dir = generate_synced_app_plugins_dir(new_name, new_version)
class DockerHelper(object):
""" Utility class to encapsulate Docker related operations """
def __init__(self, dbapi):
self._dbapi = dbapi
def _parse_barbican_secret(self, secret_ref):
"""Get the registry credentials from the
barbican secret payload
The format of the credentials stored in
barbican secret:
username:xxx password:xxx
:param secret_ref: barbican secret ref/uuid
:return: dict of registry credentials
"""
operator = openstack.OpenStackOperator(self._dbapi)
payload = operator.get_barbican_secret_payload(secret_ref)
if not payload:
raise exception.SysinvException(_(
"Unable to get the payload of Barbican secret "
"%s" % secret_ref))
try:
username, password = payload.split()
username = username.split('username:')[1]
password = password.split('password:')[1]
return dict(username=username, password=password)
except Exception as e:
LOG.error("Unable to parse the secret payload, "
"unknown format of the registry secret: %s" % e)
raise exception.SysinvException(_(
"Unable to parse the secret payload"))
def retrieve_specified_registries(self):
registries_info = \
copy.deepcopy(constants.DEFAULT_REGISTRIES_INFO)
registries_url = {}
registries_type = {}
registries_auth = {}
registries_overrides = {}
registries = self._dbapi.service_parameter_get_all(
service=constants.SERVICE_TYPE_DOCKER)
for r in registries:
if r.name == constants.SERVICE_PARAM_NAME_DOCKER_URL:
registries_url.update({r.section: str(r.value)})
elif r.name == constants.SERVICE_PARAM_NAME_DOCKER_TYPE:
registries_type.update({r.section: str(r.value)})
elif r.name == constants.SERVICE_PARAM_NAME_DOCKER_AUTH_SECRET:
registries_auth.update({r.section: str(r.value)})
elif r.name == constants.SERVICE_PARAM_NAME_DOCKER_ADDITIONAL_OVERRIDES:
registries_overrides.update({r.section: str(r.value)})
if not registries_url:
# return directly if no user specified registries
return registries_info
for section, url in registries_url.items():
try:
registries_info[section]['registry_replaced'] = str(url)
if section in registries_overrides:
registries_info[section]['registry_default'] = \
registries_overrides[section]
if section in registries_auth:
secret_ref = registries_auth[section]
if secret_ref != 'None':
# If user specified registry requires the
# authentication, get the registry auth
# from barbican secret
auth = self._parse_barbican_secret(secret_ref)
if (section in registries_type and
registries_type[section] == constants.DOCKER_REGISTRY_TYPE_AWS_ECR):
auth = cutils.get_aws_ecr_registry_credentials(
self._dbapi, url, auth['username'], auth['password'])
registries_info[section]['registry_auth'] = auth
except exception.SysinvException:
raise exception.SysinvException(_(
"Unable to get the credentials to access "
"registry %s" % url))
except KeyError:
# Unexpected
pass
return registries_info
def _get_img_tag_with_registry(self, pub_img_tag, registries_info):
"""Regenerate public image tag with user specified registries
An example of passed public image reference:
docker.io/starlingx/stx-keystone:latest
"""
if registries_info == constants.DEFAULT_REGISTRIES_INFO:
# return if no user specified registries
return pub_img_tag, None
for registry_info in registries_info.values():
registry_auth = registry_info['registry_auth']
if pub_img_tag.startswith(registry_info['registry_default']):
registry = registry_info['registry_replaced']
if registry:
img_name = pub_img_tag.split(
registry_info['registry_default'])[1]
return registry + img_name, registry_auth
return pub_img_tag, registry_auth
elif pub_img_tag.startswith(registry_info['registry_replaced']):
return pub_img_tag, registry_auth
# In case the image is overridden via "system helm-override-update"
# with a custom registry that is not from any of the known registries
# (ie..k8s.gcr.io, gcr.io, quay.io, docker.io. docker.elastic.co)
# , pull directly from the custom registry (Note: The custom registry
# must be unauthenticated in this case.)
return pub_img_tag, None
def download_an_image(self, app_name, registries_info, img_tag):
rc = True
start = time.time()
if img_tag.startswith(constants.DOCKER_REGISTRY_HOST):
try:
if AppOperator.is_app_aborted(app_name):
LOG.info("User aborted. Skipping download of image %s " % img_tag)
return img_tag, False
LOG.info("Image %s download started from local registry" % img_tag)
client = docker.APIClient(timeout=INSTALLATION_TIMEOUT)
local_registry_auth = cutils.get_local_docker_registry_auth()
auth = '{0}:{1}'.format(local_registry_auth['username'],
local_registry_auth['password'])
subprocess.check_call(["crictl", "pull", "--creds", auth, img_tag]) # pylint: disable=not-callable
except subprocess.CalledProcessError:
try:
# Pull the image from the public/private registry
LOG.info("Image %s is not available in local registry, "
"download started from public/private registry"
% img_tag)
pub_img_tag = img_tag.replace(
constants.DOCKER_REGISTRY_SERVER + "/", "")
target_img_tag, registry_auth = \
self._get_img_tag_with_registry(pub_img_tag, registries_info)
client.pull(target_img_tag, auth_config=registry_auth)
except Exception as e:
rc = False
LOG.error("Image %s download failed from public/private"
"registry: %s" | |
# -*- coding: utf-8 -*-
# A sample to demo CTC in keras
# modified from https://github.com/mbhenry/keras/blob/master/examples/image_ocr.py
'''This example uses a convolutional stack followed by a recurrent stack
and a CTC logloss function to perform optical character recognition
of generated text images. I have no evidence of whether it actually
learns general shapes of text, or just is able to recognize all
the different fonts thrown at it...the purpose is more to demonstrate CTC
inside of Keras. Note that the font list may need to be updated
for the particular OS in use.
This starts off with 4 letter words. For the first 12 epochs, the
difficulty is gradually increased using the TextImageGenerator class
which is both a generator class for test/train data and a Keras
callback class. After 20 epochs, longer sequences are thrown at it
by recompiling the model to handle a wider image and rebuilding
the word list to include two words separated by a space.
The table below shows normalized edit distance values. Theano uses
a slightly different CTC implementation, hence the different results.
Norm. ED
Epoch | TF | TH
------------------------
10 0.027 0.064
15 0.038 0.035
20 0.043 0.045
25 0.014 0.019
This requires cairo and editdistance packages:
apt install -q libcairo2-dev
pip install cairocffi
pip install editdistance
Created by <NAME>
https://github.com/mbhenry/
'''
'''
#if use google colab, we need to run the following commands to
#install required libs
!pip install cairocffi
!pip install editdistance
!apt install -q libcairo2-dev
'''
import os
import itertools
import codecs
import re
import datetime
import cairocffi as cairo
import editdistance
import numpy as np
from scipy import ndimage
import pylab
from keras import backend as K
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Activation
from keras.layers import Reshape, Lambda
from keras.layers.merge import add, concatenate
from keras.models import Model
from keras.layers.recurrent import GRU
from keras.optimizers import SGD
from keras.utils.data_utils import get_file
from keras.preprocessing import image
import keras.callbacks
import matplotlib.pyplot as plt
from PIL import Image
# if you use google colab
from google.colab import files
OUTPUT_DIR = 'image_ocr'
# character classes and matching regex filter
regex = r'^[a-z ]+$'
alphabet = u'abcdefghijklmnopqrstuvwxyz '
np.random.seed(55)
# this creates larger "blotches" of noise which look
# more realistic than just adding gaussian noise
# assumes greyscale with pixels ranging from 0 to 1
def speckle(img):
severity = np.random.uniform(0, 0.6)
blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1)
img_speck = (img + blur)
img_speck[img_speck > 1] = 1
img_speck[img_speck <= 0] = 0
return img_speck
# paints the string in a random location the bounding box
# also uses a random font, a slight random rotation,
# and a random amount of speckle noise
def paint_text(text, w, h, rotate=False, ud=False, multi_fonts=False):
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, w, h)
with cairo.Context(surface) as context:
context.set_source_rgb(1, 1, 1) # White
context.paint()
# this font list works in CentOS 7
if multi_fonts:
fonts = [
'Century Schoolbook', 'Courier', 'STIX',
'URW Chancery L', 'FreeMono']
context.select_font_face(
np.random.choice(fonts),
cairo.FONT_SLANT_NORMAL,
np.random.choice([cairo.FONT_WEIGHT_BOLD, cairo.FONT_WEIGHT_NORMAL]))
else:
context.select_font_face('Courier',
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
context.set_font_size(25)
box = context.text_extents(text)
border_w_h = (4, 4)
if box[2] > (w - 2 * border_w_h[1]) or box[3] > (h - 2 * border_w_h[0]):
raise IOError(('Could not fit string into image.'
'Max char count is too large for given image width.'))
# teach the RNN translational invariance by
# fitting text box randomly on canvas, with some room to rotate
max_shift_x = w - box[2] - border_w_h[0]
max_shift_y = h - box[3] - border_w_h[1]
top_left_x = np.random.randint(0, int(max_shift_x))
if ud:
top_left_y = np.random.randint(0, int(max_shift_y))
else:
top_left_y = h // 2
context.move_to(top_left_x - int(box[0]), top_left_y - int(box[1]))
context.set_source_rgb(0, 0, 0)
context.show_text(text)
buf = surface.get_data()
a = np.frombuffer(buf, np.uint8)
a.shape = (h, w, 4)
a = a[:, :, 0] # grab single channel
a = a.astype(np.float32) / 255
a = np.expand_dims(a, 0)
if rotate:
a = image.random_rotation(a, 3 * (w - top_left_x) / w + 1)
a = speckle(a)
return a
# shuffle a maxtrx or a list before index: stop_ind
# here in this example, we only shuffle the training data
# but not validation data.
# for example: shuffle_mats_or_lists(
# [self.X_text, self.Y_data, self.Y_len],
# self.val_split = words_per_epoch - val_words)
# where words_per_epoch = 16000
# val_split = 0.2
# val_words = int(words_per_epoch * (val_split))
def shuffle_mats_or_lists(matrix_list, stop_ind=None):
ret = []
assert all([len(i) == len(matrix_list[0]) for i in matrix_list])
len_val = len(matrix_list[0])
if stop_ind is None:
stop_ind = len_val
assert stop_ind <= len_val
# a list of [1,2, ... stop_ind]
a = list(range(stop_ind))
# only shuffle this part
np.random.shuffle(a)
# a list of [ stop_ind+1, ..., total_data_set_len]
a += list(range(stop_ind, len_val))
for mat in matrix_list:
# for a ndarray
if isinstance(mat, np.ndarray):
ret.append(mat[a])
# for list
elif isinstance(mat, list):
ret.append([mat[i] for i in a])
else:
raise TypeError('`shuffle_mats_or_lists` only supports '
'numpy.array and list objects.')
return ret
# Translation of characters to unique integer values
def text_to_labels(text):
ret = []
for char in text:
ret.append(alphabet.find(char))
return ret
# Reverse translation of numerical classes back to characters
def labels_to_text(labels):
ret = []
for c in labels:
if c == len(alphabet): # CTC Blank
ret.append("")
else:
ret.append(alphabet[c])
return "".join(ret)
# in this example, we only handle lower case a-z and space
# only a-z and space..probably not to difficult
# to expand to uppercase and symbols
def is_valid_str(in_str):
search = re.compile(regex, re.UNICODE).search
return bool(search(in_str))
class TextImageGenerator(keras.callbacks.Callback):
def __init__(self, monogram_file, bigram_file, minibatch_size,
img_w, img_h, downsample_factor, val_split,
absolute_max_string_len=16):
self.minibatch_size = minibatch_size
self.img_w = img_w
self.img_h = img_h
self.monogram_file = monogram_file
self.bigram_file = bigram_file
self.downsample_factor = downsample_factor
self.val_split = val_split
self.blank_label = self.get_output_size() - 1
self.absolute_max_string_len = absolute_max_string_len
def get_output_size(self):
return len(alphabet) + 1
# in this example, we start with 4 letter words in the mono_file
# e.g: build_word_list(16000, 4, 1)
# then we gradually use up to 12 letter words from bi-gram file
# e.g: build_word_list(32000, 12, 0.5)
# we put word into self.X_text, its integer representation) into self.Y_data
# e.g: X_text is ( hello, hello world, ... ),
# then Y_data will be: (12, 12 34, ...)
# so our training data only have mono or bi-gram words
# sure if we can recognize three-words?
# as max_string_len grows, num_words can grow
def build_word_list(self, num_words, max_string_len=None, mono_fraction=0.5):
assert max_string_len <= self.absolute_max_string_len
assert num_words % self.minibatch_size == 0
assert (self.val_split * num_words) % self.minibatch_size == 0
self.num_words = num_words
self.string_list = [''] * self.num_words
tmp_string_list = []
self.max_string_len = max_string_len
self.Y_data = np.ones([self.num_words, self.absolute_max_string_len]) * -1
self.X_text = []
self.Y_len = [0] * self.num_words
def _is_length_of_word_valid(word):
return (max_string_len == -1 or
max_string_len is None or
len(word) <= max_string_len)
# monogram file is sorted by frequency in english speech
with codecs.open(self.monogram_file, mode='r', encoding='utf-8') as f:
for line in f:
if len(tmp_string_list) == int(self.num_words * mono_fraction):
break
word = line.rstrip()
if _is_length_of_word_valid(word):
tmp_string_list.append(word)
# bigram file contains common word pairings in english speech
with codecs.open(self.bigram_file, mode='r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
if len(tmp_string_list) == self.num_words:
break
columns = line.lower().split()
word = columns[0] + ' ' + columns[1]
if is_valid_str(word) and _is_length_of_word_valid(word):
tmp_string_list.append(word)
if len(tmp_string_list) != self.num_words:
raise IOError('Could not pull enough words'
'from supplied monogram and bigram files.')
# interlace to mix up the easy and hard words
self.string_list[::2] = tmp_string_list[:self.num_words // 2]
self.string_list[1::2] = tmp_string_list[self.num_words // 2:]
for i, word in enumerate(self.string_list):
self.Y_len[i] = len(word)
self.Y_data[i, 0:len(word)] = text_to_labels(word)
self.X_text.append(word)
self.Y_len = np.expand_dims(np.array(self.Y_len), 1)
self.cur_val_index = self.val_split
self.cur_train_index = 0
# each time an image is requested from train/val/test, a new random
# painting of the text is performed
# e.g: for training data
# get_batch(cur_val_index,minibatch_size,train=True);
# for validation data
# get_batch(cur_val_index,minibatch_size,train=False);
def get_batch(self, index, size, train):
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
if K.image_data_format() == 'channels_first':
X_data = np.ones([size, 1, self.img_w, self.img_h])
else:
X_data = np.ones([size, self.img_w, self.img_h, 1])
labels = np.ones([size, self.absolute_max_string_len])
input_length = np.zeros([size, 1])
label_length = np.zeros([size, 1])
source_str = []
for i in range(size):
# Mix in some blank inputs. This seems to be important for
# achieving translational invariance
if train and i > size - 4:
if K.image_data_format() == 'channels_first':
X_data[i, 0, 0:self.img_w, :] = self.paint_func('')[0, :, :].T
else:
X_data[i, 0:self.img_w, :, 0] = self.paint_func('',)[0, :, :].T
labels[i, 0] = self.blank_label
input_length[i] = self.img_w // self.downsample_factor | |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from functools import partial
import inspect
import logging
import os
import sys
import platform
import random
import logging.handlers
from os.path import basename, dirname
from collections import defaultdict
from .utils import is_interactive, optional_args
from .support import ansi_colors as colors
__all__ = ['ColorFormatter']
# Snippets from traceback borrowed from duecredit which was borrowed from
# PyMVPA upstream/2.4.0-39-g69ad545 MIT license (the same copyright as DataLad)
def mbasename(s):
"""Custom function to include directory name if filename is too common
Also strip .py at the end
"""
base = basename(s)
if base.endswith('.py'):
base = base[:-3]
if base in set(['base', '__init__']):
base = basename(dirname(s)) + '.' + base
return base
class TraceBack(object):
"""Customized traceback to be included in debug messages
"""
def __init__(self, limit=100, collide=False):
"""Initialize TraceBack metric
Parameters
----------
collide : bool
if True then prefix common with previous invocation gets
replaced with ...
"""
self.__prev = ""
self.limit = limit
self.collide = collide
# delayed imports and preparing the regex substitution
if collide:
import re
self.__prefix_re = re.compile('>[^>]*$')
else:
self.__prefix_re = None
import traceback
self._extract_stack = traceback.extract_stack
def __call__(self):
ftb = self._extract_stack(limit=self.limit+10)[:-2]
entries = [[mbasename(x[0]), str(x[1])]
for x in ftb if mbasename(x[0]) != 'logging.__init__']
entries = [e for e in entries if e[0] != 'unittest']
if len(entries) > self.limit:
sftb = '...>'
entries = entries[-self.limit:]
else:
sftb = ''
if not entries:
return ""
# lets make it more consize
entries_out = [entries[0]]
for entry in entries[1:]:
if entry[0] == entries_out[-1][0]:
entries_out[-1][1] += ',%s' % entry[1]
else:
entries_out.append(entry)
sftb += '>'.join(
['%s:%s' % (mbasename(x[0]), x[1]) for x in entries_out]
)
if self.collide:
# lets remove part which is common with previous invocation
prev_next = sftb
common_prefix = os.path.commonprefix((self.__prev, sftb))
common_prefix2 = self.__prefix_re.sub('', common_prefix)
if common_prefix2 != "":
sftb = '...' + sftb[len(common_prefix2):]
self.__prev = prev_next
return sftb
class MemoryInfo(object):
def __init__(self):
try:
from psutil import Process
process = Process(os.getpid())
self.memory_info = process.memory_info \
if hasattr(process, 'memory_info') \
else process.get_memory_info
except:
self.memory_info = None
def __call__(self):
"""Return utilization of virtual memory
Generic implementation using psutil
"""
if not self.memory_info:
return "RSS/VMS: N/A"
mi = self.memory_info()
# in later versions of psutil mi is a named tuple.
# but that is not the case on Debian squeeze with psutil 0.1.3
rss = mi[0] / 1024
vms = mi[1] / 1024
vmem = (rss, vms)
try:
return "RSS/VMS: %d/%d kB" % vmem
except:
return "RSS/VMS: %s" % str(vmem)
# Recipe from http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
# by <NAME>
# Adjusted for automagic determination whether coloring is needed and
# prefixing of multiline log lines
class ColorFormatter(logging.Formatter):
def __init__(self, use_color=None, log_name=False, log_pid=False):
if use_color is None:
# if 'auto' - use color only if all streams are tty
use_color = is_interactive()
self.use_color = use_color and platform.system() != 'Windows' # don't use color on windows
msg = colors.format_msg(self._get_format(log_name, log_pid),
self.use_color)
log_env = os.environ.get('DATALAD_LOG_TRACEBACK', '')
collide = log_env == 'collide'
limit = 100 if collide else int(log_env) if log_env.isdigit() else 100
self._tb = TraceBack(collide=collide, limit=limit) if log_env else None
self._mem = MemoryInfo() if os.environ.get('DATALAD_LOG_VMEM', '') else None
logging.Formatter.__init__(self, msg)
def _get_format(self, log_name=False, log_pid=False):
from datalad import cfg
from datalad.config import anything2bool
show_timestamps = anything2bool(cfg.get('datalad.log.timestamp', False))
return (("" if not show_timestamps else "$BOLD%(asctime)-15s$RESET ") +
("%(name)-15s " if log_name else "") +
("{%(process)d}" if log_pid else "") +
"[%(levelname)s] "
"%(message)s ")
def format(self, record):
# safety guard if None was provided
if record.msg is None:
record.msg = ""
if record.msg.startswith('| '):
# If we already log smth which supposed to go without formatting, like
# output for running a command, just return the message and be done
return record.msg
levelname = record.levelname
if self.use_color and levelname in colors.LOG_LEVEL_COLORS:
record.levelname = colors.color_word(
"{:7}".format(levelname),
colors.LOG_LEVEL_COLORS[levelname],
force=True)
record.msg = record.msg.replace("\n", "\n| ")
if self._tb:
if not getattr(record, 'notraceback', False):
record.msg = self._tb() + " " + record.msg
if self._mem:
record.msg = "%s %s" % (self._mem(), record.msg)
return logging.Formatter.format(self, record)
class ProgressHandler(logging.Handler):
from datalad.ui import ui
def __init__(self, other_handler=None):
super(self.__class__, self).__init__()
self._other_handler = other_handler
self.pbars = {}
def emit(self, record):
from datalad.ui import ui
if not hasattr(record, 'dlm_progress'):
self._clear_all()
self._other_handler.emit(record)
self._refresh_all()
return
maint = getattr(record, 'dlm_progress_maint', None)
if maint == 'clear':
return self._clear_all()
elif maint == 'refresh':
return self._refresh_all()
pid = getattr(record, 'dlm_progress')
update = getattr(record, 'dlm_progress_update', None)
# would be an actual message, not used ATM here,
# and the record not passed to generic handler ATM
# (filtered away by NoProgressLog)
# so no final message is printed
# msg = record.getMessage()
if pid not in self.pbars:
# this is new
pbar = ui.get_progressbar(
label=getattr(record, 'dlm_progress_label', ''),
unit=getattr(record, 'dlm_progress_unit', ''),
total=getattr(record, 'dlm_progress_total', None,),
)
pbar.start(initial=getattr(record, 'dlm_progress_initial', 0))
self.pbars[pid] = pbar
elif update is None:
# not an update -> done
# TODO if the other logging that is happening is less frontpage
# we may want to actually "print" the completion message
self.pbars.pop(pid).finish()
else:
# Check for an updated label.
label = getattr(record, 'dlm_progress_label', None)
if label is not None:
self.pbars[pid].set_desc(label)
# an update
self.pbars[pid].update(
update,
increment=getattr(record, 'dlm_progress_increment', False),
total=getattr(record, 'dlm_progress_total', None))
def _refresh_all(self):
for pb in self.pbars.values():
pb.refresh()
def _clear_all(self):
# remove the progress bar
for pb in self.pbars.values():
pb.clear()
class NoProgressLog(logging.Filter):
def filter(self, record):
return not hasattr(record, 'dlm_progress')
class OnlyProgressLog(logging.Filter):
def filter(self, record):
return hasattr(record, 'dlm_progress')
def filter_noninteractive_progress(logger, record):
level = getattr(record, "dlm_progress_noninteractive_level", None)
return level is None or level >= logger.level
def log_progress(lgrcall, pid, *args, **kwargs):
"""Helper to emit a log message on the progress of some process
Note: Whereas this helper reports on interim progress and is to be used
programmatically, :class:`~datalad.ui.progressbars.LogProgressBar` replaces
a progress bar with a single log message upon completion and can be chosen
by the user (config 'datalad.ui.progressbar' set to 'log').
Parameters
----------
lgrcall : callable
Something like lgr.debug or lgr.info
pid : str
Some kind of ID for the process the progress is reported on.
*args : str
Log message, and potential arguments
total : int
Max progress quantity of the process.
label : str
Process description. Should be very brief, goes in front of progress bar
on the same line.
unit : str
Progress report unit. Should be very brief, goes after the progress bar
on the same line.
update : int
To which quantity to advance the progress.
increment : bool
If set, `update` is interpreted as an incremental value, not absolute.
initial : int
If set, start value for progress bar
noninteractive_level : int, optional
When a level is specified here and progress is being logged
non-interactively (i.e. without progress bars), do not log the message if
logging is not enabled for the specified level. This is useful when you
want all calls to be "logged" via the progress bar, but non-interactively
showing a message at the `lgrcall` level for each step would be too much
noise. Note that the level here only determines if the record will be
dropped; it will still be logged at the level of `lgrcall`.
maint : {'clear', 'refresh'}
"""
d = dict(
{'dlm_progress_{}'.format(n): v for n, v in kwargs.items()
# initial progress might be zero, but not sending it further
# would signal to destroy the progress bar, hence test for 'not None'
if v is not None},
dlm_progress=pid)
lgrcall(*args, extra=d)
@optional_args
def with_result_progress(fn, label="Total", unit=" Files", log_filter=None):
"""Wrap a progress bar, with status counts, around a function.
Parameters
----------
fn : generator function
This function should accept a collection of items as a
positional argument and any number of keyword arguments. After
processing each item in the collection, it should yield a status
dict.
log_filter : callable, optional
If defined, only result records | |
from . import abitypes
import uuid
import numbers
import random
import hashlib
import binascii
import string
import re
import os
from . import Manticore
from .manticore import ManticoreError
from .core.smtlib import ConstraintSet, Operators, solver, issymbolic, istainted, taint_with, get_taints, BitVec, Constant, operators, Array, ArrayVariable
from .core.smtlib.visitors import simplify
from .platforms import evm
from .core.state import State
from .utils.helpers import istainted, issymbolic
import tempfile
from subprocess import Popen, PIPE, check_output
from multiprocessing import Process, Queue
from Queue import Empty as EmptyQueue
import sha3
import json
import logging
import StringIO
import cPickle as pickle
from .core.plugin import Plugin
from functools import reduce
from contextlib import contextmanager
logger = logging.getLogger(__name__)
class EthereumError(ManticoreError):
pass
class DependencyError(EthereumError):
def __init__(self, lib_names):
super(DependencyError, self).__init__("You must pre-load and provide libraries addresses{ libname:address, ...} for %r" % lib_names)
self.lib_names = lib_names
class NoAliveStates(EthereumError):
pass
################ Detectors ####################
class Detector(Plugin):
@property
def name(self):
return self.__class__.__name__.split('.')[-1]
def get_findings(self, state):
return state.context.setdefault('{:s}.findings'.format(self.name), set())
@contextmanager
def locked_global_findings(self):
with self.manticore.locked_context('{:s}.global_findings'.format(self.name), set) as global_findings:
yield global_findings
@property
def global_findings(self):
with self.locked_global_findings() as global_findings:
return global_findings
def add_finding(self, state, address, pc, finding, init):
self.get_findings(state).add((address, pc, finding, init))
with self.locked_global_findings() as gf:
gf.add((address, pc, finding, init))
#Fixme for ever broken logger
#logger.warning(finding)
def add_finding_here(self, state, finding):
address = state.platform.current_vm.address
pc = state.platform.current_vm.pc
at_init = state.platform.current_transaction.sort == 'CREATE'
self.add_finding(state, address, pc, finding, at_init)
def _save_current_location(self, state, finding):
address = state.platform.current_vm.address
pc = state.platform.current_vm.pc
location = (address, pc, finding)
hash_id = hashlib.sha1(str(location)).hexdigest()
state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id] = location
return hash_id
def _get_location(self, state, hash_id):
return state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id]
def _get_src(self, address, pc):
return self.manticore.get_metadata(address).get_source_for(pc)
class FilterFunctions(Plugin):
def __init__(self, regexp=r'.*', mutability='both', depth='both', fallback=False, include=True, **kwargs):
"""
Constrain input based on function metadata. Include or avoid functions selected by the specified criteria.
Examples:
#Do not explore any human transactions that end up calling a constant function
no_human_constant = FilterFunctions(depth='human', mutability='constant', include=False)
#At human tx depth only accept synthetic check functions
only_tests = FilterFunctions(regexp=r'mcore_.*', depth='human', include=False)
:param regexp: a regular expresion over the name of the function '.*' will match all functions
:param mutability: mutable, constant or both will match functions declared in the abi to be of such class
:param depth: match functions in internal transactions, in human initiated transactions or in both types
:param fallback: if True include the fallback function. Hash will be 00000000 for it
:param include: if False exclude the selected functions, if True include them
"""
super(FilterFunctions, self).__init__(**kwargs)
depth = depth.lower()
if depth not in ('human', 'internal', 'both'):
raise ValueError
mutability = mutability.lower()
if mutability not in ('mutable', 'constant', 'both'):
raise ValueError
#fixme better names for member variables
self._regexp = regexp
self._mutability = mutability
self._depth = depth
self._fallback = fallback
self._include = include
def will_open_transaction_callback(self, state, tx):
world = state.platform
tx_cnt = len(world.all_transactions)
# Constrain input only once per tx, per plugin
if state.context.get('constrained%d' % id(self), 0) != tx_cnt:
state.context['constrained%d' % id(self)] = tx_cnt
if self._depth == 'human' and not tx.is_human:
return
if self._depth == 'internal' and tx.is_human:
return
#Get metadata if any for the targe addreess of current tx
md = self.manticore.get_metadata(tx.address)
if md is None:
return
#Lets compile the list of interesting hashes
selected_functions = []
for func_hsh in md.hashes:
if func_hsh == '00000000':
continue
abi = md.get_abi(func_hsh)
func_name = md.get_func_name(func_hsh)
if self._mutability == 'constant' and not abi.get('constant', False):
continue
if self._mutability == 'mutable' and abi.get('constant', False):
continue
if not re.match(self._regexp, func_name):
continue
selected_functions.append(func_hsh)
if self._fallback:
selected_functions.append('00000000')
if self._include:
# constraint the input so it can take only the interesting values
constraint = reduce(Operators.OR, map(lambda x: tx.data[:4] == binascii.unhexlify(x), selected_functions))
state.constrain(constraint)
else:
#Avoid all not seleted hashes
for func_hsh in md.hashes:
if func_hsh in selected_functions:
constraint = Operators.NOT(tx.data[:4] == binascii.unhexlify(func_hsh))
state.constrain(constraint)
class DetectInvalid(Detector):
def __init__(self, only_human=True, **kwargs):
"""
Detects INVALID instructions.
INVALID instructions are originally designated to signal exceptional code.
As in practice the INVALID instruction is used in different ways this
detector may Generate a great deal of false positives.
:param only_human: if True report only INVALID at depth 0 transactions
"""
super(DetectInvalid, self).__init__(**kwargs)
self._only_human = only_human
def did_evm_execute_instruction_callback(self, state, instruction, arguments, result_ref):
mnemonic = instruction.semantics
result = result_ref.value
if mnemonic == 'INVALID':
if not self._only_human or state.platform.current_transaction.depth == 0:
self.add_finding_here(state, "INVALID intruction")
class DetectIntegerOverflow(Detector):
'''
Detects potential overflow and underflow conditions on ADD and SUB instructions.
'''
def _save_current_location(self, state, finding, condition):
address = state.platform.current_vm.address
pc = state.platform.current_vm.pc
at_init = state.platform.current_transaction.sort == 'CREATE'
location = (address, pc, finding, at_init, condition)
hash_id = hashlib.sha1(str(location)).hexdigest()
state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id] = location
return hash_id
def _get_location(self, state, hash_id):
return state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id]
@staticmethod
def _signed_sub_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 False False False False True True True
+c0000001 False False False False False False True
+ffffffff False False False False False False False
+00000000 True False False False False False False
+00000001 True False False False False False False
+3fffffff True False False False False False False
+7fffffff True True True False False False False
'''
sub = Operators.SEXTEND(a, 256, 512) - Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(sub < -(1 << 256), sub >= (1 << 255))
return cond
@staticmethod
def _signed_add_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a + b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 True True True False False False False
+c0000001 True False False False False False False
+ffffffff True False False False False False False
+00000000 False False False False False False False
+00000001 False False False False False False True
+3fffffff False False False False False False True
+7fffffff False False False False True True True
'''
add = Operators.SEXTEND(a, 256, 512) + Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(add < -(1 << 256), add >= (1 << 255))
return cond
@staticmethod
def _unsigned_sub_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b ffffffff bfffffff 80000001 00000000 00000001 3ffffffff 7fffffff
ffffffff True True True False True True True
bfffffff True True True False False True True
80000001 True True True False False True True
00000000 False False False False False True False
00000001 True False False False False True False
ffffffff True True True True True True True
7fffffff True True True False False True False
'''
cond = Operators.UGT(b, a)
return cond
@staticmethod
def _unsigned_add_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a + b ffffffff bfffffff 80000001 00000000 00000001 3ffffffff 7fffffff
ffffffff True True True False True True True
bfffffff True True True False False True True
80000001 True True True False False True True
00000000 False False False False False True False
00000001 True False False False False True False
ffffffff True True True True True True True
7fffffff True True True False False True False
'''
add = Operators.ZEXTEND(a, 512) + Operators.ZEXTEND(b, 512)
cond = Operators.UGE(add, 1 << 256)
return cond
@staticmethod
def _signed_mul_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a * b +00000000000000000 +00000000000000001 +0000000003fffffff +0000000007fffffff +00000000080000001 +000000000bfffffff +000000000ffffffff
+0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000
+0000000000000001 +0000000000000000 +0000000000000001 +000000003fffffff +000000007fffffff +0000000080000001 +00000000bfffffff +00000000ffffffff
+000000003fffffff +0000000000000000 +000000003fffffff *+0fffffff80000001 *+1fffffff40000001 *+1fffffffbfffffff *+2fffffff00000001 *+3ffffffec0000001
+000000007fffffff +0000000000000000 +000000007fffffff *+1fffffff40000001 *+3fffffff00000001 *+3fffffffffffffff *+5ffffffec0000001 *+7ffffffe80000001
+0000000080000001 +0000000000000000 +0000000080000001 *+1fffffffbfffffff *+3fffffffffffffff *+4000000100000001 *+600000003fffffff *+800000007fffffff
+00000000bfffffff +0000000000000000 +00000000bfffffff *+2fffffff00000001 *+5ffffffec0000001 *+600000003fffffff *+8ffffffe80000001 *+bffffffe40000001
+00000000ffffffff +0000000000000000 +00000000ffffffff *+3ffffffec0000001 *+7ffffffe80000001 *+800000007fffffff *+bffffffe40000001 *+fffffffe00000001
'''
mul = Operators.SEXTEND(a, 256, 512) * Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(mul < -(1 << 255), mul >= (1 << 255))
return cond
@staticmethod
| |
m.x1976 - m.b3014 <= 0)
m.c1978 = Constraint(expr= m.x1977 - m.b3014 <= 0)
m.c1979 = Constraint(expr= m.x1978 - m.b3014 <= 0)
m.c1980 = Constraint(expr= m.x1979 - m.b3014 <= 0)
m.c1981 = Constraint(expr= m.x1980 - m.b3014 <= 0)
m.c1982 = Constraint(expr= m.x1981 - m.b3014 <= 0)
m.c1983 = Constraint(expr= m.x1982 - m.b3014 <= 0)
m.c1984 = Constraint(expr= m.x1983 - m.b3014 <= 0)
m.c1985 = Constraint(expr= m.x1984 - m.b3014 <= 0)
m.c1986 = Constraint(expr= m.x1985 - m.b3014 <= 0)
m.c1987 = Constraint(expr= m.x1986 - m.b3014 <= 0)
m.c1988 = Constraint(expr= m.x1987 - m.b3014 <= 0)
m.c1989 = Constraint(expr= m.x1988 - m.b3014 <= 0)
m.c1990 = Constraint(expr= m.x1989 - m.b3014 <= 0)
m.c1991 = Constraint(expr= m.x1990 - m.b3014 <= 0)
m.c1992 = Constraint(expr= m.x1991 - m.b3014 <= 0)
m.c1993 = Constraint(expr= m.x1992 - m.b3014 <= 0)
m.c1994 = Constraint(expr= m.x1993 - m.b3014 <= 0)
m.c1995 = Constraint(expr= m.x1994 - m.b3014 <= 0)
m.c1996 = Constraint(expr= m.x1995 - m.b3014 <= 0)
m.c1997 = Constraint(expr= m.x1996 - m.b3014 <= 0)
m.c1998 = Constraint(expr= m.x1997 - m.b3014 <= 0)
m.c1999 = Constraint(expr= m.x1998 - m.b3014 <= 0)
m.c2000 = Constraint(expr= m.x1999 - m.b3014 <= 0)
m.c2001 = Constraint(expr= m.x2000 - m.b3014 <= 0)
m.c2002 = Constraint(expr= m.x2001 - m.b3014 <= 0)
m.c2003 = Constraint(expr= m.x2002 - m.b3014 <= 0)
m.c2004 = Constraint(expr= m.x2003 - m.b3014 <= 0)
m.c2005 = Constraint(expr= m.x2004 - m.b3014 <= 0)
m.c2006 = Constraint(expr= m.x2005 - m.b3014 <= 0)
m.c2007 = Constraint(expr= m.x2006 - m.b3014 <= 0)
m.c2008 = Constraint(expr= m.x2007 - m.b3014 <= 0)
m.c2009 = Constraint(expr= m.x2008 - m.b3014 <= 0)
m.c2010 = Constraint(expr= m.x2009 - m.b3014 <= 0)
m.c2011 = Constraint(expr= m.x2010 - m.b3014 <= 0)
m.c2012 = Constraint(expr= m.x2011 - m.b3014 <= 0)
m.c2013 = Constraint(expr= m.x2012 - m.b3014 <= 0)
m.c2014 = Constraint(expr= m.x2013 - m.b3014 <= 0)
m.c2015 = Constraint(expr= m.x2014 - m.b3014 <= 0)
m.c2016 = Constraint(expr= m.x2015 - m.b3014 <= 0)
m.c2017 = Constraint(expr= m.x2016 - m.b3014 <= 0)
m.c2018 = Constraint(expr= m.x2017 - m.b3014 <= 0)
m.c2019 = Constraint(expr= m.x2018 - m.b3014 <= 0)
m.c2020 = Constraint(expr= m.x2019 - m.b3014 <= 0)
m.c2021 = Constraint(expr= m.x2020 - m.b3014 <= 0)
m.c2022 = Constraint(expr= m.x2021 - m.b3014 <= 0)
m.c2023 = Constraint(expr= m.x2022 - m.b3014 <= 0)
m.c2024 = Constraint(expr= m.x2023 - m.b3014 <= 0)
m.c2025 = Constraint(expr= m.x2024 - m.b3014 <= 0)
m.c2026 = Constraint(expr= m.x2025 - m.b3014 <= 0)
m.c2027 = Constraint(expr= m.x2026 - m.b3014 <= 0)
m.c2028 = Constraint(expr= m.x2027 - m.b3014 <= 0)
m.c2029 = Constraint(expr= m.x2028 - m.b3014 <= 0)
m.c2030 = Constraint(expr= m.x2029 - m.b3014 <= 0)
m.c2031 = Constraint(expr= m.x2030 - m.b3014 <= 0)
m.c2032 = Constraint(expr= m.x2031 - m.b3014 <= 0)
m.c2033 = Constraint(expr= m.x2032 - m.b3014 <= 0)
m.c2034 = Constraint(expr= m.x2033 - m.b3014 <= 0)
m.c2035 = Constraint(expr= m.x2034 - m.b3014 <= 0)
m.c2036 = Constraint(expr= m.x2035 - m.b3014 <= 0)
m.c2037 = Constraint(expr= m.x2036 - m.b3014 <= 0)
m.c2038 = Constraint(expr= m.x2037 - m.b3014 <= 0)
m.c2039 = Constraint(expr= m.x2038 - m.b3014 <= 0)
m.c2040 = Constraint(expr= m.x2039 - m.b3014 <= 0)
m.c2041 = Constraint(expr= m.x2040 - m.b3014 <= 0)
m.c2042 = Constraint(expr= m.x2041 - m.b3014 <= 0)
m.c2043 = Constraint(expr= m.x2042 - m.b3014 <= 0)
m.c2044 = Constraint(expr= m.x2043 - m.b3014 <= 0)
m.c2045 = Constraint(expr= m.x2044 - m.b3014 <= 0)
m.c2046 = Constraint(expr= m.x2045 - m.b3014 <= 0)
m.c2047 = Constraint(expr= m.x2046 - m.b3014 <= 0)
m.c2048 = Constraint(expr= m.x2047 - m.b3014 <= 0)
m.c2049 = Constraint(expr= m.x2048 - m.b3014 <= 0)
m.c2050 = Constraint(expr= m.x2049 - m.b3014 <= 0)
m.c2051 = Constraint(expr= m.x2050 - m.b3014 <= 0)
m.c2052 = Constraint(expr= m.x2051 - m.b3014 <= 0)
m.c2053 = Constraint(expr= m.x2052 - m.b3014 <= 0)
m.c2054 = Constraint(expr= m.x2053 - m.b3014 <= 0)
m.c2055 = Constraint(expr= m.x2054 - m.b3014 <= 0)
m.c2056 = Constraint(expr= m.x2055 - m.b3014 <= 0)
m.c2057 = Constraint(expr= m.x2056 - m.b3014 <= 0)
m.c2058 = Constraint(expr= m.x2057 - m.b3014 <= 0)
m.c2059 = Constraint(expr= m.x2058 - m.b3014 <= 0)
m.c2060 = Constraint(expr= m.x2059 - m.b3014 <= 0)
m.c2061 = Constraint(expr= m.x2060 - m.b3014 <= 0)
m.c2062 = Constraint(expr= m.x2061 - m.b3014 <= 0)
m.c2063 = Constraint(expr= m.x2062 - m.b3014 <= 0)
m.c2064 = Constraint(expr= m.x2063 - m.b3014 <= 0)
m.c2065 = Constraint(expr= m.x2064 - m.b3014 <= 0)
m.c2066 = Constraint(expr= m.x2065 - m.b3014 <= 0)
m.c2067 = Constraint(expr= m.x2066 - m.b3014 <= 0)
m.c2068 = Constraint(expr= m.x2067 - m.b3014 <= 0)
m.c2069 = Constraint(expr= m.x2068 - m.b3014 <= 0)
m.c2070 = Constraint(expr= m.x2069 - m.b3014 <= 0)
m.c2071 = Constraint(expr= m.x2070 - m.b3014 <= 0)
m.c2072 = Constraint(expr= m.x2071 - m.b3014 <= 0)
m.c2073 = Constraint(expr= m.x2072 - m.b3014 <= 0)
m.c2074 = Constraint(expr= m.x2073 - m.b3014 <= 0)
m.c2075 = Constraint(expr= m.x2074 - m.b3014 <= 0)
m.c2076 = Constraint(expr= m.x2075 - m.b3014 <= 0)
m.c2077 = Constraint(expr= m.x2076 - m.b3014 <= 0)
m.c2078 = Constraint(expr= m.x2077 - m.b3014 <= 0)
m.c2079 = Constraint(expr= m.x2078 - m.b3014 <= 0)
m.c2080 = Constraint(expr= m.x2079 - m.b3014 <= 0)
m.c2081 = Constraint(expr= m.x2080 - m.b3014 <= 0)
m.c2082 = Constraint(expr= m.x2081 - m.b3014 <= 0)
m.c2083 = Constraint(expr= m.x2082 - m.b3014 <= 0)
m.c2084 = Constraint(expr= m.x2083 - m.b3014 <= 0)
m.c2085 = Constraint(expr= m.x2084 - m.b3014 <= 0)
m.c2086 = Constraint(expr= m.x2085 - m.b3014 <= 0)
m.c2087 = Constraint(expr= m.x2086 - m.b3014 <= 0)
m.c2088 = Constraint(expr= m.x2087 - m.b3014 <= 0)
m.c2089 = Constraint(expr= m.x2088 - m.b3014 <= 0)
m.c2090 = Constraint(expr= m.x2089 - m.b3014 <= 0)
m.c2091 = Constraint(expr= m.x2090 - m.b3014 <= 0)
m.c2092 = Constraint(expr= m.x2091 - m.b3014 <= 0)
m.c2093 = Constraint(expr= m.x2092 - m.b3014 <= 0)
m.c2094 = Constraint(expr= m.x2093 - m.b3014 <= 0)
m.c2095 = Constraint(expr= m.x2094 - m.b3014 <= 0)
m.c2096 = Constraint(expr= m.x2095 - m.b3014 <= 0)
m.c2097 = Constraint(expr= m.x2096 - m.b3014 <= 0)
m.c2098 = Constraint(expr= m.x2097 - m.b3014 <= 0)
m.c2099 = Constraint(expr= m.x2098 - m.b3014 <= 0)
m.c2100 = Constraint(expr= m.x2099 - m.b3014 <= 0)
m.c2101 = Constraint(expr= m.x2100 - m.b3014 <= 0)
m.c2102 = Constraint(expr= m.x2101 - m.b3015 <= 0)
m.c2103 = Constraint(expr= m.x2102 - m.b3015 <= 0)
m.c2104 = Constraint(expr= m.x2103 - m.b3015 <= 0)
m.c2105 = Constraint(expr= m.x2104 - m.b3015 <= 0)
m.c2106 = Constraint(expr= m.x2105 - m.b3015 <= 0)
m.c2107 = Constraint(expr= m.x2106 - m.b3015 <= 0)
m.c2108 = Constraint(expr= m.x2107 - m.b3015 <= 0)
m.c2109 = Constraint(expr= m.x2108 - m.b3015 <= 0)
m.c2110 = Constraint(expr= m.x2109 - m.b3015 <= 0)
m.c2111 = Constraint(expr= m.x2110 - m.b3015 <= 0)
m.c2112 = Constraint(expr= m.x2111 - m.b3015 <= 0)
m.c2113 = Constraint(expr= m.x2112 - m.b3015 <= 0)
m.c2114 = Constraint(expr= m.x2113 - m.b3015 <= 0)
m.c2115 = Constraint(expr= m.x2114 - m.b3015 <= 0)
m.c2116 = Constraint(expr= m.x2115 - m.b3015 <= 0)
m.c2117 = Constraint(expr= m.x2116 - m.b3015 <= 0)
m.c2118 = Constraint(expr= m.x2117 - m.b3015 <= 0)
m.c2119 = Constraint(expr= m.x2118 - m.b3015 <= 0)
m.c2120 = Constraint(expr= m.x2119 - m.b3015 <= 0)
m.c2121 = Constraint(expr= m.x2120 - m.b3015 <= 0)
m.c2122 = Constraint(expr= m.x2121 - m.b3015 <= 0)
m.c2123 = Constraint(expr= m.x2122 - m.b3015 <= 0)
m.c2124 = Constraint(expr= m.x2123 - m.b3015 <= 0)
m.c2125 = Constraint(expr= m.x2124 - m.b3015 <= 0)
m.c2126 = Constraint(expr= m.x2125 - m.b3015 <= 0)
m.c2127 = Constraint(expr= m.x2126 - m.b3015 <= 0)
m.c2128 = Constraint(expr= m.x2127 - m.b3015 <= 0)
m.c2129 = Constraint(expr= m.x2128 - m.b3015 <= 0)
m.c2130 = Constraint(expr= m.x2129 - m.b3015 <= 0)
m.c2131 = Constraint(expr= m.x2130 - m.b3015 <= 0)
m.c2132 = Constraint(expr= m.x2131 - m.b3015 <= 0)
m.c2133 = Constraint(expr= m.x2132 - m.b3015 <= 0)
m.c2134 = Constraint(expr= m.x2133 - m.b3015 <= 0)
m.c2135 = Constraint(expr= m.x2134 - m.b3015 <= 0)
m.c2136 = Constraint(expr= m.x2135 - m.b3015 <= 0)
m.c2137 = Constraint(expr= m.x2136 - m.b3015 <= 0)
m.c2138 = Constraint(expr= m.x2137 - m.b3015 <= 0)
m.c2139 = Constraint(expr= m.x2138 - m.b3015 <= 0)
m.c2140 = Constraint(expr= m.x2139 - m.b3015 <= 0)
m.c2141 = Constraint(expr= m.x2140 - m.b3015 <= 0)
m.c2142 = Constraint(expr= m.x2141 - m.b3015 <= 0)
m.c2143 = Constraint(expr= m.x2142 - m.b3015 <= 0)
m.c2144 = Constraint(expr= m.x2143 - m.b3015 <= 0)
m.c2145 = Constraint(expr= m.x2144 - m.b3015 <= 0)
m.c2146 = Constraint(expr= m.x2145 - m.b3015 <= 0)
m.c2147 = Constraint(expr= m.x2146 - m.b3015 <= 0)
m.c2148 = Constraint(expr= m.x2147 - m.b3015 <= 0)
m.c2149 = Constraint(expr= m.x2148 - m.b3015 <= 0)
m.c2150 = Constraint(expr= m.x2149 - m.b3015 <= 0)
m.c2151 = Constraint(expr= m.x2150 - m.b3015 <= 0)
m.c2152 = Constraint(expr= m.x2151 - m.b3015 <= 0)
m.c2153 = Constraint(expr= m.x2152 - m.b3015 <= 0)
m.c2154 = Constraint(expr= m.x2153 - m.b3015 <= 0)
m.c2155 = Constraint(expr= m.x2154 - m.b3015 <= 0)
m.c2156 = Constraint(expr= m.x2155 - m.b3015 <= 0)
m.c2157 = Constraint(expr= m.x2156 - m.b3015 <= 0)
m.c2158 = Constraint(expr= m.x2157 - m.b3015 <= 0)
m.c2159 = Constraint(expr= m.x2158 - m.b3015 <= 0)
m.c2160 = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.