input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>10-100
import os
import sys
import copy
import zlib
import pickle
import json
import shutil
from importlib import import_module
from itertools import islice
from .utils_novel import get_cb_settings, request_to_dict, validate_results
import six
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.exceptions import NotConfigured, _InvalidOutput
from scrapy.http import Request, Response
from scrapy.item import Item
from scrapy.utils.conf import (build_component_list, closest_scrapy_cfg,
init_env)
from scrapy.utils.misc import arg_to_iter, load_object, walk_modules
from scrapy.utils.project import get_project_settings
from scrapy.utils.python import to_bytes
from scrapy.utils.reqser import request_from_dict
from scrapy.utils.spider import iter_spider_classes
import datadiff.tools
NO_ITEM_MARKER = object()
FIXTURE_VERSION = 1
def auto_import(qualified_name):
mod_name, class_name = qualified_name.rsplit('.', 1)
return getattr(import_module(mod_name), class_name)
def create_instance(objcls, settings, crawler, *args, **kwargs):
if settings is None:
if crawler is None:
raise ValueError("Specifiy at least one of settings and crawler.")
settings = crawler.settings
if crawler and hasattr(objcls, 'from_crawler'):
return objcls.from_crawler(crawler, *args, **kwargs)
elif hasattr(objcls, 'from_settings'):
return objcls.from_settings(settings, *args, **kwargs)
else:
return objcls(*args, **kwargs)
def get_project_dirs():
outer_dir = inner_dir = ""
closest_cfg = closest_scrapy_cfg()
if closest_cfg:
outer_dir = os.path.dirname(closest_cfg)
if os.environ.get('SCRAPY_PROJECT'):
inner_dir = os.environ.get('SCRAPY_PROJECT')
if outer_dir and inner_dir:
return (outer_dir, inner_dir)
init_env()
scrapy_module = os.environ.get('SCRAPY_SETTINGS_MODULE')
if scrapy_module is None and not outer_dir:
raise Exception("Project configuration awry")
if not inner_dir:
inner_dir = scrapy_module.split('.')[0]
if outer_dir and inner_dir:
return (outer_dir, inner_dir)
try:
module = import_module(scrapy_module)
outer_dir = os.path.dirname(os.path.dirname(module.__file__))
return (outer_dir, inner_dir)
except ImportError:
raise Exception("Project configuration awry")
def get_middlewares(spider):
full_list = build_component_list(
spider.settings.getwithbase('SPIDER_MIDDLEWARES'))
testmaster_mw_path = list(filter(
lambda x: x.endswith('TestMasterMiddleware'), full_list))[0]
start = full_list.index(testmaster_mw_path)
mw_paths = [mw for mw in full_list[start:] if mw != testmaster_mw_path]
return mw_paths
def create_dir(path, parents=False, exist_ok=False):
try:
if parents:
os.makedirs(path)
else:
os.mkdir(path)
except OSError:
if not exist_ok:
raise
def get_or_create_test_dir(base_path, spider_name, callback_name, extra=None):
components = [base_path, 'tests', spider_name]
if extra:
components.append(extra)
components.append(callback_name)
test_dir = None
for component in components:
test_dir = os.path.join(test_dir, component) if test_dir else component
create_dir(test_dir, parents=True, exist_ok=True)
init_file = os.path.join(test_dir, '__init__.py')
with open(init_file, 'a'):
os.utime(init_file, None)
test_name = '__'.join(components[2:])
return test_dir, test_name
def add_sample(index, test_dir, test_name, data):
encoding = data['response']['encoding']
filename = 'fixture%s.bin' % str(index)
path = os.path.join(test_dir, filename)
info = pickle_data({
'data': pickle_data(data),
'encoding': encoding,
'fixture_version': FIXTURE_VERSION,
})
data = compress_data(info)
with open(path, 'wb') as outfile:
outfile.write(data)
# def clear_fixtures(base_path, spider_name):
# path = os.path.join(base_path, "tests", spider_name)
# shutil.rmtree(path, ignore_errors=True)
def compress_data(data):
return zlib.compress(data)
def decompress_data(data):
return zlib.decompress(data)
def pickle_data(data):
return pickle.dumps(data, protocol=2)
def unpickle_data(data, encoding):
if six.PY2:
return pickle.loads(data)
return pickle.loads(data, encoding=encoding)
def response_to_dict(response):
return {
'cls': '{}.{}'.format(
type(response).__module__,
getattr(type(response), '__qualname__', None) or getattr(type(response), '__name__', None)
),
'url': response.url,
'status': response.status,
'body': response.body,
'headers': dict(response.headers),
'flags': response.flags,
'encoding': response.encoding,
}
def get_spider_class(spider_name, project_settings):
spider_modules = project_settings.get('SPIDER_MODULES')
for spider_module in spider_modules:
modules = walk_modules(spider_module)
for module in islice(modules, 1, None):
for spider_class in iter_spider_classes(module):
if spider_class.name == spider_name:
return spider_class
return None
def parse_object(_object, spider, cb_settings):
if isinstance(_object, Request):
return parse_request(_object, spider, cb_settings)
elif isinstance(_object, Response):
return parse_object(response_to_dict(_object), spider, cb_settings)
elif isinstance(_object, dict):
for k, v in _object.items():
_object[k] = parse_object(v, spider, cb_settings)
elif isinstance(_object, (list, tuple)):
if isinstance(_object, tuple):
_object = list(_object)
for i, v in enumerate(_object):
_object[i] = parse_object(v, spider, cb_settings)
return _object
# processes request for recording, handling auth settings
def parse_request(request, spider, cb_settings):
_request = copy.deepcopy(request_to_dict(request, spider=spider))
if not _request['callback']:
_request['callback'] = 'parse'
_clean_headers(_request['headers'], spider.settings, cb_settings)
_meta = {}
for key, value in _request.get('meta').items():
if key != '_testmaster':
_meta[key] = parse_object(value, spider, cb_settings)
_clean_splash(_meta, spider.settings, cb_settings)
_request['meta'] = _meta
return _request
def _decode_dict(data):
decoded = {}
for key, value in data.items():
if isinstance(key, bytes):
key = key.decode()
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, list) and len(value) > 0:
if isinstance(value[0], bytes):
value = value[0].decode()
decoded[key] = value
return decoded
def _clean_splash(meta, spider_settings, cb_settings):
splash_headers = meta.get('splash', {}).get('splash_headers', {})
excluded_global = spider_settings.get(
'TESTMASTER_EXCLUDED_HEADERS', default=[])
try:
excluded_local = cb_settings.EXCLUDED_HEADERS
except AttributeError:
excluded_local = []
excluded = excluded_local if excluded_local else excluded_global
included_global = spider_settings.get(
'TESTMASTER_INCLUDED_AUTH_HEADERS', default=[])
try:
included_local = cb_settings.INCLUDED_AUTH_HEADERS
except AttributeError:
included_local = []
included = included_local if included_local else included_global
if 'Authorization' not in included or 'Authorization' in excluded:
splash_headers.pop('Authorization', None)
# deliberate inclusion!
if 'Authorization' in splash_headers:
try:
splash_headers['Authorization'] = splash_headers['Authorization'].decode()
except AttributeError:
pass
def _process_for_json(data):
def _is_jsonable(short_dict):
try:
json.dumps(short_dict)
return True
except:
return False
to_delete = []
for k, v in data.items():
if not _is_jsonable({k: v}):
try:
data[k] = str(v)
except:
to_delete.append(k)
for d in to_delete:
del data[d]
def _clean_headers(headers, spider_settings, cb_settings, mode=""):
excluded_global = spider_settings.get('TESTMASTER_EXCLUDED_HEADERS', default=[])
try:
excluded_local = cb_settings.EXCLUDED_HEADERS
except AttributeError:
excluded_local = []
excluded = excluded_local if excluded_local else excluded_global
auth_headers = ['Authorization', 'Proxy-Authorization']
included_global = spider_settings.get('TESTMASTER_INCLUDED_AUTH_HEADERS', default=[])
try:
included_local = cb_settings.INCLUDED_AUTH_HEADERS
except AttributeError:
included_local = []
included = included_local if included_local else included_global
excluded.extend([h for h in auth_headers if h not in included])
for header in excluded:
headers.pop(header, None)
headers.pop(header.encode(), None)
if mode == "decode":
headers = _decode_dict(headers)
return headers
# processes request into JSON format for inscribing in view.json and for validation
def clean_request(request, spider_settings, cb_settings):
skipped_global = spider_settings.get('TESTMASTER_REQUEST_SKIPPED_FIELDS', default=[])
try:
skipped_local = cb_settings.REQUEST_SKIPPED_FIELDS
except AttributeError:
skipped_local = []
skipped_fields = skipped_local if skipped_local else skipped_global
_clean(request, skipped_fields)
request = _decode_dict(request)
request['headers'] = _clean_headers(request['headers'], spider_settings,
cb_settings, mode="decode")
_clean_splash(request['meta'], spider_settings, cb_settings)
_process_for_json(request['meta'])
return request
def clean_item(item, spider_settings, cb_settings):
skipped_global = spider_settings.get('TESTMASTER_SKIPPED_FIELDS', default=[])
try:
skipped_local = cb_settings.SKIPPED_FIELDS
except AttributeError:
skipped_local = []
skipped_fields = skipped_local if skipped_local else skipped_global
_clean(item, skipped_fields)
def _clean(data, field_list):
for field in field_list:
data.pop(field, None)
def process_result(result, spider_settings, cb_settings):
items = [copy.deepcopy(x["data"]) for x in filter(
lambda res: res["type"] == "item", result)]
requests = [copy.deepcopy(x["data"]) for x in filter(
lambda res: res["type"] == "request", result)]
for i in range(len(items)):
clean_item(items[i], spider_settings, cb_settings)
requests = [clean_request(req, spider_settings, cb_settings) for
req in requests]
return items, requests
def erase_special_metakeys(request):
new_meta = {}
for k, v in request.meta.items():
if not k.startswith('_'):
new_meta[k] = v
new_req = request.replace(meta=new_meta)
return new_req
def write_test(path, test_name, url):
command = 'scrapy {}'.format(' '.join(sys.argv))
test_path = os.path.join(path, 'test_fixtures.py')
config_file = os.path.join(path, 'config.py')
test_code = '''# THIS IS A GENERATED FILE
# Generated by: {command} # noqa: E501
# Request URL: {url} # noqa: E501
import os
import unittest
from scrapy_testmaster.utils import generate_test
class TestMaster(unittest.TestCase):
def test__{test_name}(self):
files = os.listdir(
os.path.dirname(
os.path.abspath(__file__)
)
)
files = [f for f in files if f.endswith('.bin')]
self.maxDiff = None
for f in files:
file_path = os.path.join(os.path.dirname(__file__), f)
print("Testing fixture '%s' in location: %s" % (f, file_path))
test = generate_test(os.path.abspath(file_path))
test(self)
if __name__ == '__main__':
unittest.main()
'''.format(
test_name=test_name,
command=command,
url=url,
)
with open(str(test_path), 'w') as f:
f.write(test_code)
if not os.path.exists(config_file):
config_src = os.path.dirname(__file__) + '/config_doc.py'
shutil.copyfile(config_src, config_file)
def binary_check(fx_obj, cb_obj, encoding):
if isinstance(cb_obj, (dict, Item)):
fx_obj = {
key: binary_check(value, cb_obj[key], encoding)
for key, value in fx_obj.items()
}
if isinstance(cb_obj, list):
fx_obj = [
binary_check(fxitem, cbitem, encoding)
for fxitem, cbitem in zip(fx_obj, cb_obj)
]
if isinstance(cb_obj, Request):
headers = {}
for key, value in fx_obj['headers'].items():
key = to_bytes(key, encoding)
headers[key] = [to_bytes(v, encoding) for v in value]
fx_obj['headers'] = headers
fx_obj['body'] = to_bytes(fx_obj['body'], encoding)
if isinstance(cb_obj, six.binary_type):
fx_obj = fx_obj.encode(encoding)
return fx_obj
def set_spider_attrs(spider, _args):
for k, v in _args.items():
setattr(spider, k, v)
def parse_callback_result(result, spider, cb_settings):
processed_result = []
out = []
for elem in result:
out.append(elem)
is_request = isinstance(elem, Request)
if is_request:
_data = parse_request(elem, spider, cb_settings)
else:
_data = parse_object(copy.deepcopy(elem), spider, cb_settings)
processed_result.append({
'type': 'request' if is_request else 'item',
'data': _data
})
return processed_result, out
def prepare_callback_replay(fixture_path, encoding="utf-8"):
with open(str(fixture_path), 'rb') as f:
raw_data = f.read()
fixture_info = unpickle_data(decompress_data(raw_data), encoding)
if 'fixture_version' in fixture_info:
encoding = fixture_info['encoding']
data = unpickle_data(fixture_info['data'], encoding)
else:
data = fixture_info # legacy tests
settings = get_project_settings()
spider_name = data.get('spider_name')
if not spider_name: # legacy tests
spider_name = os.path.basename(
os.path.dirname(
os.path.dirname(fixture_path)
)
)
spider_cls = get_spider_class(spider_name, settings)
spider_cls.update_settings(settings)
for k, v in data.get('settings', {}).items():
settings.set(k, v, 50)
crawler = Crawler(spider_cls, settings)
spider_args_in = data.get('spider_args', data.get('spider_args_in', {}))
spider = spider_cls.from_crawler(crawler)
for k, v in spider_args_in.items():
setattr(spider, k, v)
crawler.spider = spider
return data, crawler, spider, settings
def generate_test(fixture_path, encoding='utf-8'):
data, crawler, spider, settings = prepare_callback_replay(
fixture_path, encoding=encoding
)
def test(self):
fx_result = data['result']
fx_version = data.get('python_version')
spider_args_in = data.get(
'spider_args', data.get('spider_args_in', {}))
set_spider_attrs(spider, spider_args_in)
request = request_from_dict(data['request'], spider)
response_cls = auto_import(data['response'].pop(
'cls', 'scrapy.http.HtmlResponse'))
response = response_cls(request=request, **data['response'])
middlewares = []
middleware_paths = data['middlewares']
for mw_path in middleware_paths:
try:
mw_cls = load_object(mw_path)
mw = create_instance(mw_cls, settings, crawler)
middlewares.append(mw)
except NotConfigured:
continue
crawler.signals.send_catch_log(
signal=signals.spider_opened,
spider=spider
)
result_attr_in = {
k: v for k, v in spider.__dict__.items()
| |
def p_opaque_0(self, t):
"""
opaque : OPAQUE ID gate_scope bit_list
"""
# TODO: even less of an idea than barrier how to deal with this
def p_opaque_1(self, t):
"""
opaque : OPAQUE ID gate_scope '(' ')' bit_list
"""
def p_opaque_2(self, t):
"""
opaque : OPAQUE ID gate_scope '(' gate_id_list ')' bit_list
"""
def p_opaque_1e(self, t):
"""
opaque : OPAQUE ID gate_scope '(' error
"""
raise SyntaxError("Poorly formed OPAQUE statement.")
# ----------------------------------------
# measure : MEASURE primary ASSIGN primary
# ----------------------------------------
def p_measure(self, t):
"""
measure : MEASURE primary ASSIGN primary
"""
if len(t[2]) != len(t[4]):
raise ValueError(
"registers are of different sizes '"
+ t[2]
+ "' is of size "
+ len(t[2])
+ " and '"
+ t[4]
+ "' is of size "
+ len(t[4])
)
op = Gate()
op.name = "measure"
op.qblist = [t[2], t[4]]
t[0] = [op]
def p_measure_e(self, t):
"""
measure : MEASURE primary error
"""
raise SyntaxError("Illegal measure statement." + str(t[3]))
# ----------------------------------------
# barrier : BARRIER primary_list
#
# Errors are covered by handling erros in primary_list
# ----------------------------------------
def p_barrier(self, t):
"""
barrier : BARRIER primary_list
"""
# TODO implement barriers, or maybe just apply them
# ----------------------------------------
# reset : RESET primary
# ----------------------------------------
def p_reset(self, t):
"""
reset : RESET primary
"""
t[0] = []
for qbit in range(0, len(t[2])):
op = Gate()
op.name = "reset"
op.qblist = [t[2][qbit]]
t[0].append(op)
# TODO might be an error to take a single argument
# ----------------------------------------
# IF '(' ID MATCHES NNINTEGER ')' unitary_op
# ----------------------------------------
def p_if(self, t):
"""
if : IF '(' ID MATCHES NNINTEGER ')' unitary_op
| IF '(' ID MATCHES NNINTEGER ')' measure
| IF '(' ID MATCHES NNINTEGER ')' opaque
| IF '(' ID MATCHES NNINTEGER ')' barrier
| IF '(' ID MATCHES NNINTEGER ')' reset
| IF '(' ID error
| IF '(' ID MATCHES error
| IF '(' ID MATCHES NNINTEGER error
| IF error
"""
new_op = None # For cythonization errors
new_ops = None
if len(t) == 3:
raise SyntaxError("Ill-formed IF statement. Perhaps a" + " missing '('?")
if len(t) == 5:
raise SyntaxError(
"Ill-formed IF statement. Expected '==', " + "received '" + str(t[4])
)
if len(t) == 6:
raise SyntaxError(
"Ill-formed IF statement. Expected a number, "
+ "received '"
+ str(t[5])
)
if len(t) == 7:
raise SyntaxError("Ill-formed IF statement, unmatched '('")
if t[7] == "if":
raise SyntaxError("Nested IF statements not allowed")
if t[7] == "barrier":
raise SyntaxError("barrier not permitted in IF statement")
if t[7] == "measure":
raise SyntaxError("measure not permitted in IF statement")
if t[7] == "reset":
raise SyntaxError("reset not permitted in IF statement")
# TODO convert the cregister's value from binary to decimal
# then compare with input number
c_size = self.creg_exists(t[3])
if c_size:
c_index = self.get_indexed_id(t[3], 0)
else:
raise ValueError("No such classical bit register")
if 1 << c_size > t[5]: # creg size must be able to contain the int
bit = bin(t[5])
formula = "AND " * (len(bit) - 3)
# prefix notation, so we'll put all operators in the beginning
# print("formula was "+formula+" and index "+str(c_index))
for i in range(2, len(bit)):
formula += "NOT " if bit[i] == "0" else ""
formula += str(c_index + i - 2) + " " # OP c[i] NOT c[i+1]
# print("then "+str(i)+" formula is "+formula)
# print("formula is "+formula)
# TODO quantum_op is always empty so replenish it
# TODO might be better to change IF into a unitary_op/measure/etc
# TODO since IF needs to know whether to apply the gates or not
for op in t[7]:
if op.name == "measure":
raise ImplementationError("Conditional measures are not supported")
elif op.name == "reset":
raise ImplementationError("Conditional resets are not supported")
elif op.name in self.standard_gates:
# print("gate standard "+ self.standard_gates[op.name])
if (
self.standard_gates[op.name] == "SDG"
or self.standard_gates[op.name] == "TDG"
or self.standard_gates[op.name] == "CU1"
or self.standard_gates[op.name] == "CU2"
or self.standard_gates[op.name] == "CU3"
or self.standard_gates[op.name] == "CH"
or self.standard_gates[op.name] == "CRZ"
or self.standard_gates[op.name] == "CSWAP"
or len(op.params)
== self.compiler.gate_set[self.standard_gates[op.name]].nb_args
):
if self.standard_gates[op.name] == "SDG":
ast = GateAST("S", op.params)
ast = GateAST("DAG", ast)
elif self.standard_gates[op.name] == "TDG":
ast = GateAST("T", op.params)
ast = GateAST("DAG", ast)
elif self.standard_gates[op.name] == "CU1":
ast = GateAST("U1", op.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[op.name] == "CU2":
ast = GateAST("U2", op.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[op.name] == "CU3":
ast = GateAST("U3", op.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[op.name] == "CH":
ast = GateAST("H", op.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[op.name] == "CRZ":
ast = GateAST("RZ", op.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[op.name] == "CSWAP":
ast = GateAST("SWAP", op.params)
ast = GateAST("CTRL", ast)
else:
ast = GateAST(self.standard_gates[op.name], op.params)
new_op = self.compiler.build_op_by_ast(ast, op.qblist)
else:
raise InvalidParameterNumber(
self.standard_gates[op.name],
self.compiler.gate_set[
self.standard_gates[op.name]
].nb_args,
op.params,
self.lineno,
)
else:
# print(">>>>>>>>We got a gate called "+op.name)
# print("is it true though ? "+str(self.is_routine(op.name)))
if self.is_routine(op.name, len(op.qblist), len(op.params)):
new_ops = self.build_routine(op.name, op.qblist, op.params)
else:
raise NameError(
"No such gate or routine, '"
+ op.name
+ "' or wrong number of arguments or "
"parameters\nSupported Clifford gates"
+ " are :\nh, cx, ccx, x, y, z, s, "
+ "swap, cz'"
)
if new_ops is not None:
# print("IF routine gives " + str(len(new_ops)))
for o in new_ops:
o.type = OpType.CLASSICCTRL
o.formula = formula
self.compiler.ops.append(o)
else:
new_op.type = OpType.CLASSICCTRL
new_op.formula = formula
self.compiler.ops.append(new_op)
# ----------------------------------------
# These are all the things you can have outside of a gate declaration
# quantum_op : unitary_op
# | opaque
# | measure
# | reset
# | barrier
# | if
#
# ----------------------------------------
def p_quantum_op(self, t):
"""
quantum_op : unitary_op
| opaque
| measure
| barrier
| reset
| if
"""
t[0] = []
if t[1] is None:
return
for gat in t[1]:
if gat.name == "measure":
op = self.compiler.build_measure(gat.qblist[0], gat.qblist[1])
t[0].append(op)
elif gat.name == "reset":
op = self.compiler.build_reset([gat.qblist[0]], [gat.qblist[0]])
t[0].append(op)
elif gat.name in self.standard_gates:
if (
self.standard_gates[gat.name] == "SDG"
or self.standard_gates[gat.name] == "TDG"
or self.standard_gates[gat.name] == "CU1"
or self.standard_gates[gat.name] == "CU2"
or self.standard_gates[gat.name] == "CU3"
or self.standard_gates[gat.name] == "CH"
or self.standard_gates[gat.name] == "CRZ"
or self.standard_gates[gat.name] == "CSWAP"
or len(gat.params)
== self.compiler.gate_set[self.standard_gates[gat.name]].nb_args
):
if self.standard_gates[gat.name] == "SDG":
ast = GateAST("S", gat.params)
ast = GateAST("DAG", ast)
elif self.standard_gates[gat.name] == "TDG":
ast = GateAST("T", gat.params)
ast = GateAST("DAG", ast)
elif self.standard_gates[gat.name] == "CU1":
ast = GateAST("U1", gat.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[gat.name] == "CU2":
ast = GateAST("U2", gat.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[gat.name] == "CU3":
ast = GateAST("U3", gat.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[gat.name] == "CH":
ast = GateAST("H", gat.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[gat.name] == "CRZ":
ast = GateAST("RZ", gat.params)
ast = GateAST("CTRL", ast)
elif self.standard_gates[gat.name] == "CSWAP":
ast = GateAST("SWAP", gat.params)
ast = GateAST("CTRL", ast)
else:
ast = GateAST(self.standard_gates[gat.name], gat.params)
op = self.compiler.build_op_by_ast(ast, gat.qblist)
t[0].append(op)
else:
raise InvalidParameterNumber(
self.standard_gates[gat.name],
self.compiler.gate_set[self.standard_gates[gat.name]].nb_args,
gat.params,
self.lineno,
)
else:
# print(">>>>>>>>We got a gate called "+gat.name)
# print("is it true though ? "+str(self.is_routine(gat.name)))
if self.is_routine(gat.name, len(gat.qblist), len(gat.params)):
ops = self.build_routine(gat.name, gat.qblist, gat.params)
# print("routine yields " + str(len(ops)) + " ops")
t[0].extend(ops)
else:
raise NameError(
"No such gate or routine, '"
+ gat.name
+ "' or wrong number of arguments or "
"parameters\nSupported Clifford gates"
+ " are :\nh, cx, ccx, x, y, z, s, "
+ "swap, cz'"
)
# print("Quantum op has ")
# for o in t[0]:
# print(o)
# ----------------------------------------
# unary : NNINTEGER
# | REAL
# | PI
# | ID
# | '(' expression ')'
# | ID '(' expression ')'
#
# We will trust 'expression' to throw before we have to handle it here
# ----------------------------------------
def p_unary_0(self, t):
"""
unary : NNINTEGER
"""
t[0] = t[1]
def p_unary_1(self, t):
"""
unary : REAL
"""
t[0] = t[1]
def p_unary_2(self, t):
"""
unary : PI
"""
t[0] = math.pi
def p_unary_3(self, t):
"""
unary : ID
"""
t[0] = t[1]
def p_unary_4(self, t):
| |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
from psycopg2 import sql
from abc import ABC
from csv import DictReader
from queue import Queue
from schematic import NameSqlMixin, DictableMixin
class ColumnTypeNotFoundError(Exception):
pass
class TableColumnType(ABC, NameSqlMixin, DictableMixin):
"""Represents a type for a table column.
The "size" of two different column types can be compared using
the built in comparison operators. For two TableColumnTypes, A and B,
if A is a superset of B, then A > B evaluates to True.
Note that TableColumnTypes are only fully comparable when in the
same subtree. If A is not found when traversing next_less_restrictive
in B and vice versa, A < B, A > B, and A == B will all return False
Attributes:
name: The name of the type
next_less_restrictive: The TableColumnType which is next less restrictive
parameter: Instance parameter for this class. E.g., '256' for a VARCHAR(256)
TODO(Cody): Comparisons of incomparable operands should raise an exception
TODO(Cody): Factor out the "most to least restrictive" iterator logic into a method on this class.
TODO(Cody): Change the "parameter" attribute to use **kwargs.
"""
name = "TableColumnType"
next_less_restrictive = None
parameterized = False
def __init__(self, parameter=None):
if parameter is not None and not self.parameterized:
raise ValueError(
"{} is not a parameterized type.".format(
self.name))
self.parameter = parameter
def __hash__(self):
return hash((self.name, self.parameter))
def __repr__(self):
if self.parameterized:
return "{}({})".format(self.name, self.parameter)
else:
return '{}'.format(self.name)
def __eq__(self, other):
return isinstance(
other, type(self)) and other.parameter == self.parameter
def __lt__(self, other):
return other > self
def __gt__(self, other):
nlr = other
while nlr:
nlr = nlr.next_less_restrictive() if nlr.next_less_restrictive else None
if nlr == self:
return True
return False
def get_depth(self):
"""Get the distance between this TableColumnType
and the least restrictive TableColumnType in its
next_less_restrictive linked list
Returns:
An int
"""
depth = -1
nlr = self
while nlr:
nlr = nlr.next_less_restrictive
depth += 1
return depth
def value_is_compatible(self, value):
"""Checks to see if the given value can be inserted into a column of
the type described by this instance.
Args:
value: The value to check, a string
Returns:
True if _value_is_compatible_superset(self, value) is True and
this type isn't parameterized
Raises:
NotImplementedError: This should be implemented by subclasses
if subclasses are parameterized.
"""
if self.parameterized:
raise NotImplementedError
else:
return self._value_is_compatible_superset(value)
def _value_is_compatible_superset(self, value):
"""Checks to see if the given value can be inserted into a column of
the group of types described by this class.
This is used, for example, to check if a value could
fit into any VARCHAR class in a SQL database, whereas
value_is_compatible would be used
to check if a value could fit into specifically
into a VARCHAR(256) class.
Args:
value: The value to check, a string
Raises:
NotImplementedError: This should be implemented by subclasses.
"""
raise NotImplementedError
@staticmethod
def get_parameter_for_value(value):
"""Get the parameter for a parameterized implementation
of this class that is required to fit the given value.
Args:
value: the value to return the parameter for.
Raises:
NotImplementedError: Subclasses should implement this.
"""
raise NotImplementedError
@classmethod
def from_value(cls, value):
"""Create an instance of this class that is compatible with value.
Args:
value: The value to return an instance for
Returns:
An instance of cls that can fit the value.
Raises:
NoCompatibleParameterError: if value cannot fit in any instance of this class.
"""
if cls.parameterized:
if not cls()._value_is_compatible_superset(value):
raise ValueError(
"Value {} not compatible with any instance of {}".format(
value, cls.name))
else:
return cls(parameter=cls.get_parameter_for_value(value))
else:
return cls()
class TableColumn(ABC, DictableMixin, NameSqlMixin):
"""DB-agnostic base class for storing info about a column in a table.
Attributes:
name: The identifier for the column
type: A TableColumnType instance with type information for the column.
"""
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __hash__(self):
return hash((self.name, self.column_type))
def __repr__(self):
return "TableColumn('{}', {})".format(self.name, repr(self.column_type))
def __eq__(self, other):
return isinstance(self, type(
other)) and self.name == other.name and self.column_type == other.column_type
class TableDefinition(ABC, DictableMixin, NameSqlMixin):
"""DB-agnostic base class for storing info about a table
Attributes:
name: The identifier for this table.
columns: A list of TableColumns describing this table's columns.
"""
def __init__(self, name, columns):
self.name = name
if len(frozenset([c.name for c in columns])) != len(columns):
raise ValueError("Cannot have multiple columns of same name in TableDefinition")
self.columns = columns
def create_sql(self):
"""Generate a sql statement for creating a table based on this TableDefinition.
Raises:
NotImplementedError: Subclasses should implement this.
"""
raise NotImplementedError
def __eq__(self, other):
return isinstance(
self,
type(other)) and self.name == other.name and frozenset(
self.columns) == frozenset(
other.columns)
def __repr__(self):
return "{}: [{}]".format(self.name, ",".join(
[str(col) for col in self.columns]))
def create_table(self, *args, **kwargs):
"""Create the table in the destination
specified in *args and **kwargs
Raises:
NotImplementedError: Subclasses should implement this.
"""
raise NotImplementedError
def add_column(self, column):
"""Add a column to the column list.
Args:
column: A TableColumn instance to be added to this instance's columns.
"""
for col in self.columns:
if col.name.upper() == column.name.upper():
raise ValueError(
"Column with name {} already exists in TableDefinition {}".format(
column.name, self.name))
self.columns.append(column)
def update_column(self, column):
"""Update the existing column with name column.name to the given column.
Args:
column: A TableColumn instance to be added to this instance's columns.
Raises:
ValueError: If TableColumn with column.name does not exist in columns
"""
for i, col in enumerate(self.columns):
if col.name == column.name:
self.columns[i] = column
return
raise ValueError(
"No such column name {} in {}".format(
column.name, self.name))
def column_names(self):
"""Get a list of column names for this table."""
return [col.name for col in self.columns]
def get_rows(self, *args, **kwargs):
"""Generator for rows of the table described by this TableDefinition.
Args:
*args, **kwargs: DB-specific arguments for connection, e.g.,
a psycopg2.connection or csv.DictReader object
Raises:
NotImplementedError: Subclasses should implement this.
"""
raise NotImplementedError
@classmethod
def from_source(cls, *args, **kwargs):
"""Instantiate from an implementation-specific source (e.g., a CSV file or a DB connection
Raises:
NotImplementedError: Subclasses should implement this.
"""
raise NotImplementedError
class Schematic(ABC, DictableMixin):
"""Interface for implementation specifics for a type of database or warehouse.
The TableColumnTypes in a given schematic form a tree with the most
restrictive types being leaf nodes and the least restrictive type
being the root node.
Attributes:
name: Static attribute with the name of this schematic
most_restrictive_types: the leaf nodes of the restrictivity tree
table_def: implementation of TableDefinition for this schematic
"""
name = 'schematic'
most_restrictive_types = []
table_definition_class = TableDefinition
column_class = TableColumn
null_strings = []
def get_distance_from_leaf_node(self, column_type):
"""Get the distance between the given TableColumnType
and its nearest leaf node.
Returns:
An int
"""
distances = []
for ct in self.most_restrictive_types:
distance = 0
nlr = ct
while nlr:
if nlr == column_type:
distances.append(distance)
nlr = None
else:
distance += 1
nlr = nlr.next_less_restrictive
if not distances:
raise ColumnTypeNotFoundError
return min(distances)
def get_type(self, value, previous_type=None):
"""Get what type of column the given value would be.
Args:
value: the value to get the type for.
previous_type: the type that the column this value is in
had previously been assigned
| |
# Copyright 2018 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import string
import datetime
from pulsar import MessageId
from streamsets.testframework.markers import pulsar, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
SNAPSHOT_TIMEOUT_SEC = 120
def get_pulsar_consumer_stage(pipeline_builder, topic, initial_offset):
"""Create and return a Pulsar Consumer origin stage depending on execution mode for the pipeline."""
pulsar_consumer = pipeline_builder.add_stage('Pulsar Consumer',
type='origin')
pulsar_consumer.set_attributes(data_format='TEXT',
batch_wait_time_in_ms=20000,
topic=topic,
consumer_name='consumer',
initial_offset=initial_offset)
return pulsar_consumer
def get_pulsar_producer_stage(pipeline_builder, topic):
"""Create and return a Pulsar Producer origin stage depending on execution mode for the pipeline."""
pulsar_producer = pipeline_builder.add_stage('Pulsar Producer',
type='destination')
pulsar_producer.set_attributes(data_format='TEXT',
text_field_path='/text',
topic=topic)
return pulsar_producer
def verify_results(pulsar_consumer_pipeline, sdc_executor, number_of_messages, message, data_format):
"""Take a snapshot from sdc Pulsar origin pipeline to verify results."""
snapshot_pipeline_command = sdc_executor.capture_snapshot(pulsar_consumer_pipeline, start_pipeline=True, wait=False)
verify_results_snapshot(pulsar_consumer_pipeline, snapshot_pipeline_command, number_of_messages, message,
data_format)
def verify_results_snapshot(pulsar_consumer_pipeline, snapshot_pipeline_command, number_of_messages,
message, data_format):
"""Take a snapshot from sdc Pulsar origin pipeline to verify results."""
logger.debug('Finish the snapshot and verify')
logger.debug('Time: %s', datetime.datetime.now())
snapshot_command = snapshot_pipeline_command.wait_for_finished(timeout_sec=SNAPSHOT_TIMEOUT_SEC)
snapshot = snapshot_command.snapshot
record_field = [record.field for record in snapshot[pulsar_consumer_pipeline[0].instance_name].output]
assert record_field is not None
number_of_messages_received = 0
for record in record_field:
if data_format == 'TEXT':
assert record['text'] == message
elif data_format == 'JSON':
assert record['Name'] == message['Name']
assert record['Job'] == message['Job']
elif data_format == 'XML':
assert record['value'] == message
elif data_format == 'DELIMITED':
logger.debug(record)
logger.debug(message)
assert list(record.values())[0] == message.split(',')[0]
assert list(record.values())[1] == message.split(',')[1]
number_of_messages_received += 1
assert number_of_messages_received == number_of_messages
def get_dev_raw_data_source(pipeline_builder, raw_data, data_format='JSON', stop_after_first_batch=False):
""" Adds a 'Dev Raw Data Source' stage to pipeline_builder and sets raw_data, data_format and
stop_after_first_batch properties of that stage. Returns the added stage"""
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.data_format = data_format
dev_raw_data_source.raw_data = raw_data
dev_raw_data_source.stop_after_first_batch = stop_after_first_batch
return dev_raw_data_source
@pulsar
@sdc_min_version('3.5.0')
def test_pulsar_consumer(sdc_builder, sdc_executor, pulsar):
"""Test for Pulsar consumer origin stage. We do so by publishing data to a test topic using Pulsar client and
having a pipeline which reads that data using Pulsar consumer origin stage. Data is then asserted for what is
published at Pulsar client and what we read in the pipeline snapshot. The pipeline looks like:
Pulsar Consumer pipeline:
pulsar_consumer >> trash
"""
sub_name = get_random_string(string.ascii_letters, 10)
consumer_name = get_random_string(string.ascii_letters, 10)
topic_name = get_random_string(string.ascii_letters, 10)
max_records = 100
input_text = 'Hello World!'
builder = sdc_builder.get_pipeline_builder()
pulsar_consumer = builder.add_stage('Pulsar Consumer').set_attributes(subscription_name=sub_name,
consumer_name=consumer_name,
topic=topic_name,
data_format='TEXT',
max_batch_size_in_records=max_records)
trash = builder.add_stage('Trash')
pulsar_consumer >> trash
consumer_origin_pipeline = builder.build(title='Pulsar Consumer pipeline').configure_for_environment(pulsar)
sdc_executor.add_pipeline(consumer_origin_pipeline)
client = pulsar.client
admin = pulsar.admin
try:
snapshot_command = sdc_executor.capture_snapshot(consumer_origin_pipeline, start_pipeline=True, wait=False)
producer = client.create_producer(topic_name)
for _ in range(max_records):
producer.send(input_text.encode())
snapshot = snapshot_command.wait_for_finished().snapshot
sdc_executor.stop_pipeline(consumer_origin_pipeline)
output_records = [record.field['text'] for record in snapshot[pulsar_consumer.instance_name].output]
assert output_records == [input_text] * 10 # 10 hardcoded for snapshot batch size
finally:
producer.close() # all producer/consumers need to be closed before topic can be deleted without force
client.close()
admin.delete_topic(producer.topic())
@pulsar
@sdc_min_version('3.5.0')
def test_pulsar_producer(sdc_builder, sdc_executor, pulsar):
"""Test for Pulsar producer target stage. We do so by publishing data to a test topic using Pulsar producer
stage and then read the data from that topic using Pulsar client. We assert the data from the client to what has
been injected by the producer pipeline. The pipeline looks like:
Pulsar Producer pipeline:
dev_raw_data_source >> pulsar_producer
"""
topic_name = get_random_string(string.ascii_letters, 10)
input_text = 'Hello World!'
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='TEXT',
raw_data=input_text)
pulsar_producer = builder.add_stage('Pulsar Producer')
pulsar_producer.set_attributes(topic=topic_name, data_format='TEXT')
dev_raw_data_source >> pulsar_producer
producer_dest_pipeline = builder.build(title='Pulsar Producer pipeline').configure_for_environment(pulsar)
# add pipeline and capture pipeline messages to assert
sdc_executor.add_pipeline(producer_dest_pipeline)
sdc_executor.start_pipeline(producer_dest_pipeline).wait_for_pipeline_batch_count(10)
sdc_executor.stop_pipeline(producer_dest_pipeline)
history = sdc_executor.get_pipeline_history(producer_dest_pipeline)
msgs_sent_count = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
logger.debug('Number of messages ingested into the pipeline = %s', msgs_sent_count)
client = pulsar.client
admin = pulsar.admin
try:
reader = client.create_reader(topic_name, MessageId.earliest)
msgs_received = []
while reader.has_message_available():
msgs_received.append(reader.read_next().data().decode().strip()) # strip to remove newlines
finally:
reader.close() # reader needs to be closed before topic can be deleted without force
client.close()
admin.delete_topic(reader.topic())
logger.debug('Number of messages received from Pulsar = %d', len(msgs_received))
assert msgs_received == [input_text] * msgs_sent_count
@pulsar
@sdc_min_version('3.5.0')
def test_pulsar_origin_standalone_text(sdc_builder, sdc_executor, pulsar):
"""Write simple text messages into pulsar and confirm that pulsar successfully reads them.
Specifically, this would look like:
Pulsar Producer Destination pipeline with standalone mode:
dev_raw_data_soruce >> pulsar_producer
Pulsar Consumer Origin pipeline with standalone mode:
pulsar_consumer >> trash
"""
message = 'pulsar message martin xavi'
data = {'text': message}
json_data = json.dumps(data)
topic = get_random_string(string.ascii_letters, 10)
# Build the Pulsar producer pipeline with Standalone mode.
pulsar_producer_pipeline_builder = sdc_builder.get_pipeline_builder()
pulsar_producer_pipeline_builder.add_error_stage('Discard')
dev_raw_data_source = get_dev_raw_data_source(pulsar_producer_pipeline_builder, json_data)
pulsar_producer = get_pulsar_producer_stage(pulsar_producer_pipeline_builder, topic)
pulsar_producer.set_attributes(compresion_type='NONE')
dev_raw_data_source >> pulsar_producer
pulsar_producer_pipeline = pulsar_producer_pipeline_builder.build(
title='Pulsar Producer Standalone Text').configure_for_environment(pulsar)
sdc_executor.add_pipeline(pulsar_producer_pipeline)
# Build the Pulsar consumer pipeline with Standalone mode.
pulsar_consumer_pipeline_builder = sdc_builder.get_pipeline_builder()
pulsar_consumer_pipeline_builder.add_error_stage('Discard')
pulsar_consumer = get_pulsar_consumer_stage(pulsar_consumer_pipeline_builder, topic, 'LATEST')
pulsar_consumer.set_attributes(subscription_type='FAILOVER',
consumer_queue_size=1000)
trash = pulsar_consumer_pipeline_builder.add_stage(label='Trash')
pulsar_consumer >> trash
pulsar_consumer_pipeline = pulsar_consumer_pipeline_builder.build(
title='Pulsar Consumer Standalone pipeline').configure_for_environment(pulsar)
sdc_executor.add_pipeline(pulsar_consumer_pipeline)
try:
number_of_messages = 10
# Publish messages to Pulsar and verify using snapshot if the same messages are received.
sdc_executor.start_pipeline(pulsar_producer_pipeline).wait_for_pipeline_batch_count(1)
verify_results(pulsar_consumer_pipeline,
sdc_executor,
number_of_messages,
message, 'TEXT')
finally:
sdc_executor.stop_pipeline(pulsar_producer_pipeline, wait=True, force=True)
sdc_executor.stop_pipeline(pulsar_consumer_pipeline, wait=True, force=True)
@pulsar
@sdc_min_version('3.5.0')
def test_pulsar_origin_standalone_json(sdc_builder, sdc_executor, pulsar):
"""Write simple json messages into pulsar and confirm that pulsar successfully reads them.
Specifically, this would look like:
Pulsar Producer Destination pipeline with standalone mode:
dev_raw_data_soruce >> pulsar_producer
Pulsar Consumer Origin pipeline with standalone mode:
pulsar_consumer >> trash
Producer includes topic name in message field
Producer partition type = Round Robin
"""
topic = get_random_string(string.ascii_letters, 10)
message = {'Topic': topic, 'Name': 'Xavi', 'Job': 'Developer'}
json_data = json.dumps(message)
# Build the Pulsar producer pipeline with Standalone mode.
pulsar_producer_pipeline_builder = sdc_builder.get_pipeline_builder()
pulsar_producer_pipeline_builder.add_error_stage('Discard')
dev_raw_data_source = get_dev_raw_data_source(pulsar_producer_pipeline_builder, json_data)
pulsar_producer = get_pulsar_producer_stage(pulsar_producer_pipeline_builder, '${record:value(\'/Topic\')}')
pulsar_producer.set_attributes(data_format='JSON',
partition_type='ROUND_ROBIN',
compresion_type='LZ4')
dev_raw_data_source >> pulsar_producer
pulsar_producer_pipeline = pulsar_producer_pipeline_builder.build(
title='Pulsar Producer Standalone JSON').configure_for_environment(pulsar)
sdc_executor.add_pipeline(pulsar_producer_pipeline)
# Build the Pulsar consumer pipeline with Standalone mode.
pulsar_consumer_pipeline_builder = sdc_builder.get_pipeline_builder()
pulsar_consumer_pipeline_builder.add_error_stage('Discard')
pulsar_consumer = get_pulsar_consumer_stage(pulsar_consumer_pipeline_builder, topic,
'EARLIEST')
pulsar_consumer.set_attributes(subscription_type='SHARED',
consumer_queue_size=10000,
data_format='JSON')
trash = pulsar_consumer_pipeline_builder.add_stage(label='Trash')
pulsar_consumer >> trash
pulsar_consumer_pipeline = pulsar_consumer_pipeline_builder.build(
title='Pulsar Consumer Standalone JSON').configure_for_environment(pulsar)
sdc_executor.add_pipeline(pulsar_consumer_pipeline)
try:
number_of_messages = 10
# Publish messages to Pulsar and verify using snapshot if the same messages are received.
sdc_executor.start_pipeline(pulsar_producer_pipeline).wait_for_pipeline_batch_count(1)
verify_results(pulsar_consumer_pipeline,
sdc_executor,
number_of_messages,
message, 'JSON')
finally:
sdc_executor.stop_pipeline(pulsar_producer_pipeline, wait=True, force=True)
sdc_executor.stop_pipeline(pulsar_consumer_pipeline, wait=True, force=True)
@pulsar
@sdc_min_version('3.5.0')
def test_pulsar_origin_standalone_xml(sdc_builder, sdc_executor, pulsar):
"""Write simple XML messages into pulsar and confirm that pulsar successfully reads them.
Specifically, this would look like:
Pulsar Producer Destination pipeline with standalone mode:
dev_raw_data_soruce text message >> pulsar_producer XML
Pulsar Consumer Origin pipeline with standalone mode:
pulsar_consumer XML >> trash
"""
message = "Text message that will be converted in XML"
topic = get_random_string(string.ascii_letters, 10)
# Build the Pulsar producer pipeline with Standalone mode.
pulsar_producer_pipeline_builder = sdc_builder.get_pipeline_builder()
pulsar_producer_pipeline_builder.add_error_stage('Discard')
dev_raw_data_source = get_dev_raw_data_source(pulsar_producer_pipeline_builder, message, data_format='TEXT')
pulsar_producer = get_pulsar_producer_stage(pulsar_producer_pipeline_builder, topic)
pulsar_producer.set_attributes(data_format='XML',
partition_type='SINGLE',
hashing_scheme='JAVA_STRING_HASH',
message_key='12345',
compresion_type='ZLIB')
dev_raw_data_source >> pulsar_producer
pulsar_producer_pipeline = pulsar_producer_pipeline_builder.build(
title='Pulsar Producer Standalone XML').configure_for_environment(pulsar)
sdc_executor.add_pipeline(pulsar_producer_pipeline)
# Build the Pulsar consumer pipeline with Standalone mode.
pulsar_consumer_pipeline_builder = sdc_builder.get_pipeline_builder()
pulsar_consumer_pipeline_builder.add_error_stage('Discard')
pulsar_consumer = get_pulsar_consumer_stage(pulsar_consumer_pipeline_builder, topic, 'EARLIEST')
pulsar_consumer.set_attributes(data_format='XML',
consumer_queue_size=1)
trash = pulsar_consumer_pipeline_builder.add_stage(label='Trash')
pulsar_consumer >> trash
pulsar_consumer_pipeline = pulsar_consumer_pipeline_builder.build(
title='Pulsar Consumer Standalone XML').configure_for_environment(pulsar)
sdc_executor.add_pipeline(pulsar_consumer_pipeline)
try:
number_of_messages = 10
# Publish messages to Pulsar and verify using snapshot if the same messages are received.
sdc_executor.start_pipeline(pulsar_producer_pipeline).wait_for_pipeline_batch_count(1)
verify_results(pulsar_consumer_pipeline,
sdc_executor,
number_of_messages,
message, 'XML')
finally:
sdc_executor.stop_pipeline(pulsar_producer_pipeline, wait=True, force=True)
sdc_executor.stop_pipeline(pulsar_consumer_pipeline, wait=True, force=True)
@pulsar
@sdc_min_version('3.5.0')
def test_pulsar_origin_standalone_topics_list(sdc_builder, sdc_executor, pulsar):
"""Write simple json messages into pulsar and confirm that pulsar successfully reads them.
Specifically, this would look like:
Pulsar Producer Destination pipeline with standalone mode:
dev_raw_data_soruce >> pulsar_producer_1
Pulsar Producer Destination pipeline with standalone mode:
dev_raw_data_soruce >> pulsar_producer_2
Pulsar Consumer Origin pipeline with standalone mode:
pulsar_consumer_1_2 >> trash
"""
message = {'Name': 'Xavi', 'Job': 'Developer'}
json_data = json.dumps(message)
topic1 = get_random_string(string.ascii_letters, 10)
topic2 = get_random_string(string.ascii_letters, 10)
# Build the First Pulsar producer pipeline with Standalone mode.
pulsar_producer_pipeline_builder1 = sdc_builder.get_pipeline_builder()
pulsar_producer_pipeline_builder1.add_error_stage('Discard')
dev_raw_data_source = get_dev_raw_data_source(pulsar_producer_pipeline_builder1, json_data,
stop_after_first_batch=True)
pulsar_producer = get_pulsar_producer_stage(pulsar_producer_pipeline_builder1, topic1)
pulsar_producer.set_attributes(data_format='JSON',
hashing_scheme='MUMUR3_32HASH',
message_key='12345',
enable_batching=False,
async_send=False)
dev_raw_data_source >> pulsar_producer
pulsar_producer_pipeline1 = pulsar_producer_pipeline_builder1.build(
title='Pulsar Producer Standalone List Topics 1').configure_for_environment(pulsar)
sdc_executor.add_pipeline(pulsar_producer_pipeline1)
# Build the Second Pulsar | |
<reponame>Octoberr/swm0920
"""table automated task"""
# -*- coding:utf-8 -*-
import time
import traceback
from commonbaby.helpers import helper_time
from commonbaby.sql import (SqlConn, SqliteColumn, SqliteConn,
SqliteConnManager, SqliteIndex, SqliteTable,
table_locker)
from ..sqlcondition import SqlCondition, SqlConditions
from datacontract import AutomatedTask, AutotaskBack, ECommandStatus
from .sqlite_config import SqliteConfig
from .tbsqlitebase import TbSqliteBase
class TbAutomatedTask(TbSqliteBase):
"""TbAutomatedTask表及相关操作"""
__tb_AutomatedTask: SqliteTable = SqliteTable(
'AutomatedTask',
True,
SqliteColumn(
colname='Id',
coltype='INTEGER',
nullable=False,
is_primary_key=True,
is_auto_increament=True,
is_unique=True).set_index_new(),
SqliteColumn(colname='Platform', nullable=False).set_index_new(),
SqliteColumn(colname="Source"), # 新增
SqliteColumn(colname='TaskId', nullable=False).set_index_new(),
SqliteColumn(
colname='AutoTaskType', coltype='INTEGER',
nullable=False).set_index_new(),
SqliteColumn(
colname='IsPeriod',
coltype='INTEGER',
nullable=False,
defaultval=0).set_index_new(),
SqliteColumn(
colname='PeriodNum',
coltype='INTEGER',
nullable=False,
defaultval=1).set_index_new(),
SqliteColumn(colname='Interval', coltype='REAL'),
SqliteColumn(colname='LastStartTime', coltype='DATETIME'),
SqliteColumn(colname='LastEndTime', coltype='DATETIME'),
SqliteColumn(colname='Status', coltype='INTEGER',
nullable=False).set_index_new(),
SqliteColumn(colname='CmdRcvMsg'),
SqliteColumn(
colname='Progress', #0~1浮点数表百分比
coltype='REAL',
defaultval=0).set_index_new(),
SqliteColumn(colname='BatchTotalCount', coltype='INTEGER'),
SqliteColumn(colname='BatchCompleteCount',
coltype='INTEGER').set_index_new(),
SqliteColumn(colname='Sequence', coltype='INTEGER',
defaultval=0).set_index_new(),
SqliteColumn(colname='CreateTime', coltype='DATETIME'),
SqliteColumn(colname='UpdateTime', coltype='REAL',
nullable=False).set_index_new(),
)
# 所有列,复制粘贴用...:
# Platform,
# Source,
# TaskId,
# AutoTaskType,
# IsPeriod,
# PeriodNum,
# Interval,
# LastStartTime,
# LastEndTime,
# Status,
# CmdRcvMsg,
# Progress,
# BatchTotalCount,
# BatchCompleteCount,
# Sequence,
# CreateTime,
# UpdateTime,
def __init__(self, dbcfg: SqliteConfig):
""""""
TbSqliteBase.__init__(self, TbAutomatedTask.__tb_AutomatedTask._tbname,
dbcfg)
def _append_tables(self):
self._conn_mngr.append_table(TbAutomatedTask.__tb_AutomatedTask)
@table_locker(__tb_AutomatedTask._tbname)
def save_new_automatedtask(
self,
task: AutomatedTask,
cmdstatus: ECommandStatus = ECommandStatus.WaitForSend,
) -> bool:
"""保存新的批处理任务的令牌资源\n
task:任务对象"""
res = False
conn: SqliteConn = None
cursor = None
task: AutomatedTask = task
try:
# 搜索每个库,看有没有 TokenId一样的,且时间更新
# 的,一样就更新其他所有字段
cmd = f'''SELECT COUNT(1) FROM {self._tbname} WHERE
TaskId=? and Platform=?'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
try:
cursor = conn.cursor
cursor.execute(cmd, (
task.taskid,
task._platform,
))
result = cursor.fetchall()
if result[0][0] > 0:
res = True
cmd = f'''UPDATE {self._tbname} set
AutoTaskType=?,
Source=?,
IsPeriod=?,
PeriodNum=?,
Interval=?,
LastStartTime=?,
LastEndTime=?,
Status=?,
CmdRcvMsg=?,
Progress=?,
BatchTotalCount=?,
BatchCompleteCount=?,
Sequence=?
UpdateTime=?,
WHERE TaskId=? and Platform=? and UpdateTime<=?;'''
result = cursor.execute(
cmd,
(
task.autotasktype.value,
task.source,
1 if task._is_period else 0,
task.periodnum,
task.cmd.stratagy.interval,
task.laststarttime,
task.lastendtime,
task.cmdstatus.value,
task.cmdrcvmsg,
task.progress,
task.batchtotalcount,
task.batchcompletecount,
0, #重置sequence
helper_time.ts_since_1970_tz(),
task.taskid,
task._platform,
task.createtime,
))
# 这句没用,就是调试看看结果..
if result is None or result.rowcount > 1: # or len(result) < 1:
pass
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
if res:
break
# 若没找到,则insert一条到最新的库
# res==True表示至少有一个库里面有一条符合条件的任务,且已更新其字段
if not res:
conn = self.connect_write(5)
try:
# insert
cmd = f'''INSERT INTO {self._tbname}(
Platform,
TaskId,
AutoTaskType,
IsPeriod,
PeriodNum,
Interval,
LastStartTime,
LastEndTime,
Status,
BatchTotalCount,
BatchCompleteCount,
Progress,
Source,
CmdRcvMsg,
CreateTime,
UpdateTime,
Sequence) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
# 有几个属于TaskBack的字段在此不更新
cursor = conn.cursor
result = cursor.execute(cmd, (
task._platform,
task.taskid,
task.autotasktype.value,
1 if task._is_period else 0,
task.periodnum,
task.cmd.stratagy.interval,
task.laststarttime,
task.lastendtime,
task.cmdstatus.value,
task.batchtotalcount,
task.batchcompletecount,
0,
task.source,
'',
task.createtime,
helper_time.ts_since_1970_tz(),
0,
))
if result is None or result.rowcount < 1: # or len(result) < 1:
res = False
else:
res = True
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"save new AutomatedTask error: %s" % traceback.format_exc())
return res
@table_locker(__tb_AutomatedTask._tbname)
def select_automatedtask(self, conds: SqlConditions) -> dict:
"""按条件搜索任务,返回数据行转换成的字段字典"""
conn: SqliteConn = None
cursor = None
conds: SqlConditions = conds
try:
cmd = f'''SELECT
Platform,
TaskId,
AutoTaskType,
IsPeriod,
PeriodNum,
Interval,
LastStartTime,
LastEndTime,
Status,
BatchTotalCount,
BatchCompleteCount,
Progress,
Source,
CmdRcvMsg,
CreateTime,
UpdateTime,
Sequence
FROM {self._tbname} WHERE {conds.text_normal}'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
conn._conn.row_factory = self._dict_factory
try:
cursor = conn.cursor
cursor.execute(cmd, conds.params)
result = cursor.fetchall()
if result is None or len(result) < 1:
continue
fields: dict = {}
for i in range(len(result[0])):
fields[cursor.description[i][0].lower()] = result[0][i]
return fields
except Exception:
self._logger.error("Get AutomatedTask error: {}".format(
traceback.format_exc()))
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"Get IScanTask error: %s" % traceback.format_exc())
@table_locker(__tb_AutomatedTask._tbname)
def select_automatedtasks(self, conds: SqlConditions) -> iter:
"""按条件搜索任务,返回数据行转换成的字段字典迭代器"""
conn: SqliteConn = None
cursor = None
conds: SqlConditions = conds
try:
cmd = f'''SELECT
Platform,
TaskId,
AutoTaskType,
IsPeriod,
PeriodNum,
Interval,
LastStartTime,
LastEndTime,
Status,
BatchTotalCount,
BatchCompleteCount,
Progress,
Source,
CmdRcvMsg,
CreateTime,
UpdateTime,
Sequence
FROM {self._tbname} WHERE {conds.text_normal}'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
conn._conn.row_factory = self._dict_factory
try:
cursor = conn.cursor
cursor.execute(cmd, conds.params)
result = cursor.fetchall()
if result is None or len(result) < 1:
continue
for row in result:
fields: dict = {}
for i in range(len(row)):
fields[cursor.description[i][0].lower()] = row[i]
yield fields
except Exception:
self._logger.error("Get AutomatedTask error: {}".format(
traceback.format_exc()))
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"Get AutomatedTask error: %s" % traceback.format_exc())
@table_locker(__tb_AutomatedTask._tbname)
def update_autotask_status(
self,
platform: str,
taskid: str,
cmdstatus: ECommandStatus,
) -> bool:
"""更新task的Status状态字段"""
conn: SqliteConn = None
cursor = None
res: bool = False
try:
cmd = f'''UPDATE {self._tbname} set
Status=?
WHERE Platform=? and Taskid=?;'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
try:
cursor = conn.cursor
result = cursor.execute(cmd, (
cmdstatus.value,
platform,
taskid,
))
if not result is None and result.rowcount > 0:
res = True # 一定只有一个子任务
break
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
if res:
break
except Exception:
self._logger.error("update {} Status error: {}".format(
self._tbname, traceback.format_exc()))
return res
@table_locker(__tb_AutomatedTask._tbname)
def update_automatedtask2(self, task: AutomatedTask) -> bool:
"""更新AutomatedTask表,根据platform,taskid更新其他所有字段"""
res = False
conn: SqliteConn = None
cursor = None
task: AutomatedTask = task
try:
cmd = f'''SELECT COUNT(1) FROM {self._tbname} WHERE Platform=? and TaskId=?'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
try:
cursor = conn.cursor
cursor.execute(cmd, (task._platform, task.taskid))
result = cursor.fetchall()
if result[0][0] > 0:
# 只根据TaskId、platform作为条件,
# 不考虑 任务文件产生时间与现有数据库中已存在任务的时间,每次直接覆盖,以符合用户操作。
# 若来了TaskId一样的数据,则必然分配给同一个ClientId
cmd = f'''UPDATE {self._tbname} set
IsPeriod=?,
PeriodNum=?,
Interval=?,
LastStartTime=?,
LastEndTime=?,
Status=?,
BatchTotalCount=?,
BatchCompleteCount=?,
Progress=?,
Source=?,
CmdRcvMsg=?,
Sequence=?,
UpdateTime=? WHERE TaskId=? and Platform=?;'''
result = cursor.execute(cmd, (
1 if task._is_period else 0,
task.periodnum,
task.cmd.stratagy.interval,
task.laststarttime,
task.lastendtime,
task.cmdstatus.value,
task.batchtotalcount,
task.batchcompletecount,
task.progress,
task.source,
task.cmdrcvmsg,
task.sequence,
helper_time.ts_since_1970_tz(),
task.taskid,
task.platform,
))
if result is None or result.rowcount < 1: # or len(result) < 1:
continue
else:
res = True
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
if res:
break
except Exception:
self._logger.error(
"Update AutomatedTask error: %s" % traceback.format_exc())
return res
@table_locker(__tb_AutomatedTask._tbname)
def update_automatedtask3(self, platform: str, taskid: str,
updatefields: dict) -> bool:
"""更新AutomatedTask表,根据platform,taskid更新指定字段"""
res = False
conn: SqliteConn = None
cursor = None
try:
if not isinstance(updatefields, dict) or len(updatefields) < 1:
return True
cmd = f'''SELECT COUNT(1) FROM {self._tbname} WHERE Platform=? and TaskId=?'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
try:
cursor = conn.cursor
cursor.execute(cmd, (platform, taskid))
result = cursor.fetchall()
if result[0][0] > 0:
# 只根据TaskId、platform作为条件,
# 不考虑 任务文件产生时间与现有数据库中已存在任务的时间,每次直接覆盖,以符合用户操作。
# 若来了TaskId一样的数据,则必然分配给同一个ClientId
sqlset = ''
for k in updatefields.keys():
sqlset = sqlset + '{}=?,'.format(k)
sqlset = sqlset.rstrip(',')
cmd = f'''UPDATE {self._tbname} set {sqlset} WHERE TaskId=? and Platform=?;'''
params = [v for v in updatefields.values()]
params.append(taskid)
params.append(platform)
result = cursor.execute(cmd, params)
if result is None or result.rowcount < 1: # or len(result) < 1:
continue
else:
res = True
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
if res:
break
except Exception:
self._logger.error(
"Update AutomatedTask error: %s" % traceback.format_exc())
return res
@table_locker(__tb_AutomatedTask._tbname)
def get_automatedtask_batch_total_count(self, platform: str,
taskid: str) -> int:
"""查询指定task的batchtotalcount,返回-1表示没找到指定的task"""
res: int = -1 #返回-1表示没找到指定的task
conn: SqliteConn = None
cursor = None
try:
cmd = f'''SELECT BatchTotalCount FROM {self._tbname} WHERE Platform=? and TaskId=?'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
try:
cursor = conn.cursor
cursor.execute(cmd, (
platform,
taskid,
))
result = cursor.fetchall()
if result is None or len(result) < 1:
continue
res = result[0][0]
except Exception:
self._logger.error(
"Get AutomatedTask batchtotalcount error: {}".format(
traceback.format_exc()))
finally:
if not conn is None:
conn.close()
if res:
break
except Exception:
self._logger.error("Get AutomatedTask batchtotalcount error: %s" %
traceback.format_exc())
return res
@table_locker(__tb_AutomatedTask._tbname)
def get_automatedtask_batch_complete_count(self, platform: str,
taskid: str) -> int:
"""查询指定task的batchtotalcount,返回-1表示没找到指定的task"""
res: int = -1 #返回-1表示没找到指定的task
conn: SqliteConn = None
cursor = None
try:
cmd = f'''SELECT BatchCompleteCount FROM {self._tbname} WHERE Platform=? and TaskId=?'''
for conn in self.connect_all(5):
conn: SqliteConn = conn
try:
cursor = conn.cursor
cursor.execute(cmd, (
platform,
taskid,
))
result = cursor.fetchall()
if result is None or len(result) < 1:
continue
res = result[0][0]
except Exception:
self._logger.error(
"Get AutomatedTask batchcompletecount error: {}".
format(traceback.format_exc()))
finally:
if not conn is None:
conn.close()
if res:
break
except Exception:
self._logger.error("Get AutomatedTask batchcompletecount error: %s"
% traceback.format_exc())
return res
@table_locker(__tb_AutomatedTask._tbname)
def update_automatedtask_batch_total_count(self,
task: AutomatedTask) -> bool:
"""更新指定task的batchtotalcount字段,返回bool指示是否成功"""
res: bool = False
conn: SqliteConn = None
cursor = None
try:
cmd = f'''SELECT COUNT(1) FROM {self._tbname} | |
""" IGMP_STD_MIB
The MIB module for IGMP Management.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class IGMPSTDMIB(Entity):
"""
.. attribute:: igmpinterfacetable
The (conceptual) table listing the interfaces on which IGMP is enabled
**type**\: :py:class:`IgmpInterfaceTable <ydk.models.cisco_ios_xe.IGMP_STD_MIB.IGMPSTDMIB.IgmpInterfaceTable>`
**config**\: False
.. attribute:: igmpcachetable
The (conceptual) table listing the IP multicast groups for which there are members on a particular interface
**type**\: :py:class:`IgmpCacheTable <ydk.models.cisco_ios_xe.IGMP_STD_MIB.IGMPSTDMIB.IgmpCacheTable>`
**config**\: False
"""
_prefix = 'IGMP-STD-MIB'
_revision = '2000-09-28'
def __init__(self):
super(IGMPSTDMIB, self).__init__()
self._top_entity = None
self.yang_name = "IGMP-STD-MIB"
self.yang_parent_name = "IGMP-STD-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("igmpInterfaceTable", ("igmpinterfacetable", IGMPSTDMIB.IgmpInterfaceTable)), ("igmpCacheTable", ("igmpcachetable", IGMPSTDMIB.IgmpCacheTable))])
self._leafs = OrderedDict()
self.igmpinterfacetable = IGMPSTDMIB.IgmpInterfaceTable()
self.igmpinterfacetable.parent = self
self._children_name_map["igmpinterfacetable"] = "igmpInterfaceTable"
self.igmpcachetable = IGMPSTDMIB.IgmpCacheTable()
self.igmpcachetable.parent = self
self._children_name_map["igmpcachetable"] = "igmpCacheTable"
self._segment_path = lambda: "IGMP-STD-MIB:IGMP-STD-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(IGMPSTDMIB, [], name, value)
class IgmpInterfaceTable(Entity):
"""
The (conceptual) table listing the interfaces on which IGMP
is enabled.
.. attribute:: igmpinterfaceentry
An entry (conceptual row) representing an interface on which IGMP is enabled
**type**\: list of :py:class:`IgmpInterfaceEntry <ydk.models.cisco_ios_xe.IGMP_STD_MIB.IGMPSTDMIB.IgmpInterfaceTable.IgmpInterfaceEntry>`
**config**\: False
"""
_prefix = 'IGMP-STD-MIB'
_revision = '2000-09-28'
def __init__(self):
super(IGMPSTDMIB.IgmpInterfaceTable, self).__init__()
self.yang_name = "igmpInterfaceTable"
self.yang_parent_name = "IGMP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("igmpInterfaceEntry", ("igmpinterfaceentry", IGMPSTDMIB.IgmpInterfaceTable.IgmpInterfaceEntry))])
self._leafs = OrderedDict()
self.igmpinterfaceentry = YList(self)
self._segment_path = lambda: "igmpInterfaceTable"
self._absolute_path = lambda: "IGMP-STD-MIB:IGMP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(IGMPSTDMIB.IgmpInterfaceTable, [], name, value)
class IgmpInterfaceEntry(Entity):
"""
An entry (conceptual row) representing an interface on
which IGMP is enabled.
.. attribute:: igmpinterfaceifindex (key)
The ifIndex value of the interface for which IGMP is enabled
**type**\: int
**range:** 1..2147483647
**config**\: False
.. attribute:: igmpinterfacequeryinterval
The frequency at which IGMP Host\-Query packets are transmitted on this interface
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: seconds
.. attribute:: igmpinterfacestatus
The activation of a row enables IGMP on the interface. The destruction of a row disables IGMP on the interface
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
**config**\: False
.. attribute:: igmpinterfaceversion
The version of IGMP which is running on this interface. This object can be used to configure a router capable of running either value. For IGMP to function correctly, all routers on a LAN must be configured to run the same version of IGMP on that LAN
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: igmpinterfacequerier
The address of the IGMP Querier on the IP subnet to which this interface is attached
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: igmpinterfacequerymaxresponsetime
The maximum query response time advertised in IGMPv2 queries on this interface
**type**\: int
**range:** 0..255
**config**\: False
**units**\: tenths of seconds
.. attribute:: igmpinterfacequerieruptime
The time since igmpInterfaceQuerier was last changed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: igmpinterfacequerierexpirytime
The amount of time remaining before the Other Querier Present Timer expires. If the local system is the querier, the value of this object is zero
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: igmpinterfaceversion1queriertimer
The time remaining until the host assumes that there are no IGMPv1 routers present on the interface. While this is non\- zero, the host will reply to all queries with version 1 membership reports
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: igmpinterfacewrongversionqueries
The number of queries received whose IGMP version does not match igmpInterfaceVersion, over the lifetime of the row entry. IGMP requires that all routers on a LAN be configured to run the same version of IGMP. Thus, if any queries are received with the wrong version, this indicates a configuration error
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: igmpinterfacejoins
The number of times a group membership has been added on this interface; that is, the number of times an entry for this interface has been added to the Cache Table. This object gives an indication of the amount of IGMP activity over the lifetime of the row entry
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: igmpinterfaceproxyifindex
Some devices implement a form of IGMP proxying whereby memberships learned on the interface represented by this row, cause IGMP Host Membership Reports to be sent on the interface whose ifIndex value is given by this object. Such a device would implement the igmpV2RouterMIBGroup only on its router interfaces (those interfaces with non\-zero igmpInterfaceProxyIfIndex). Typically, the value of this object is 0, indicating that no proxying is being done
**type**\: int
**range:** 0..2147483647
**config**\: False
.. attribute:: igmpinterfacegroups
The current number of entries for this interface in the Cache Table
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: igmpinterfacerobustness
The Robustness Variable allows tuning for the expected packet loss on a subnet. If a subnet is expected to be lossy, the Robustness Variable may be increased. IGMP is robust to (Robustness Variable\-1) packet losses
**type**\: int
**range:** 1..255
**config**\: False
.. attribute:: igmpinterfacelastmembqueryintvl
The Last Member Query Interval is the Max Response Time inserted into Group\-Specific Queries sent in response to Leave Group messages, and is also the amount of time between Group\-Specific Query messages. This value may be tuned to modify the leave latency of the network. A reduced value results in reduced time to detect the loss of the last member of a group. The value of this object is irrelevant if igmpInterfaceVersion is 1
**type**\: int
**range:** 0..255
**config**\: False
**units**\: tenths of seconds
"""
_prefix = 'IGMP-STD-MIB'
_revision = '2000-09-28'
def __init__(self):
super(IGMPSTDMIB.IgmpInterfaceTable.IgmpInterfaceEntry, self).__init__()
self.yang_name = "igmpInterfaceEntry"
self.yang_parent_name = "igmpInterfaceTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['igmpinterfaceifindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('igmpinterfaceifindex', (YLeaf(YType.int32, 'igmpInterfaceIfIndex'), ['int'])),
('igmpinterfacequeryinterval', (YLeaf(YType.uint32, 'igmpInterfaceQueryInterval'), ['int'])),
('igmpinterfacestatus', (YLeaf(YType.enumeration, 'igmpInterfaceStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
('igmpinterfaceversion', (YLeaf(YType.uint32, 'igmpInterfaceVersion'), ['int'])),
('igmpinterfacequerier', (YLeaf(YType.str, 'igmpInterfaceQuerier'), ['str'])),
('igmpinterfacequerymaxresponsetime', (YLeaf(YType.uint32, 'igmpInterfaceQueryMaxResponseTime'), ['int'])),
('igmpinterfacequerieruptime', (YLeaf(YType.uint32, 'igmpInterfaceQuerierUpTime'), ['int'])),
('igmpinterfacequerierexpirytime', (YLeaf(YType.uint32, 'igmpInterfaceQuerierExpiryTime'), ['int'])),
('igmpinterfaceversion1queriertimer', (YLeaf(YType.uint32, 'igmpInterfaceVersion1QuerierTimer'), ['int'])),
('igmpinterfacewrongversionqueries', (YLeaf(YType.uint32, 'igmpInterfaceWrongVersionQueries'), ['int'])),
('igmpinterfacejoins', (YLeaf(YType.uint32, 'igmpInterfaceJoins'), ['int'])),
('igmpinterfaceproxyifindex', (YLeaf(YType.int32, 'igmpInterfaceProxyIfIndex'), ['int'])),
('igmpinterfacegroups', (YLeaf(YType.uint32, 'igmpInterfaceGroups'), ['int'])),
('igmpinterfacerobustness', (YLeaf(YType.uint32, 'igmpInterfaceRobustness'), ['int'])),
('igmpinterfacelastmembqueryintvl', (YLeaf(YType.uint32, 'igmpInterfaceLastMembQueryIntvl'), ['int'])),
])
self.igmpinterfaceifindex = None
self.igmpinterfacequeryinterval = None
self.igmpinterfacestatus = None
self.igmpinterfaceversion = None
self.igmpinterfacequerier = None
self.igmpinterfacequerymaxresponsetime = None
self.igmpinterfacequerieruptime = None
self.igmpinterfacequerierexpirytime = None
self.igmpinterfaceversion1queriertimer = None
self.igmpinterfacewrongversionqueries = None
self.igmpinterfacejoins = None
self.igmpinterfaceproxyifindex = None
self.igmpinterfacegroups = None
self.igmpinterfacerobustness = None
self.igmpinterfacelastmembqueryintvl = None
self._segment_path = lambda: "igmpInterfaceEntry" + "[igmpInterfaceIfIndex='" + str(self.igmpinterfaceifindex) + "']"
self._absolute_path = lambda: "IGMP-STD-MIB:IGMP-STD-MIB/igmpInterfaceTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(IGMPSTDMIB.IgmpInterfaceTable.IgmpInterfaceEntry, ['igmpinterfaceifindex', 'igmpinterfacequeryinterval', 'igmpinterfacestatus', 'igmpinterfaceversion', 'igmpinterfacequerier', 'igmpinterfacequerymaxresponsetime', 'igmpinterfacequerieruptime', 'igmpinterfacequerierexpirytime', 'igmpinterfaceversion1queriertimer', 'igmpinterfacewrongversionqueries', 'igmpinterfacejoins', 'igmpinterfaceproxyifindex', 'igmpinterfacegroups', 'igmpinterfacerobustness', 'igmpinterfacelastmembqueryintvl'], name, value)
class IgmpCacheTable(Entity):
"""
The (conceptual) table listing the IP multicast groups for
which there are members on a particular interface.
.. attribute:: igmpcacheentry
An entry (conceptual row) in the igmpCacheTable
**type**\: list of :py:class:`IgmpCacheEntry <ydk.models.cisco_ios_xe.IGMP_STD_MIB.IGMPSTDMIB.IgmpCacheTable.IgmpCacheEntry>`
**config**\: False
"""
_prefix = 'IGMP-STD-MIB'
_revision = '2000-09-28'
def __init__(self):
super(IGMPSTDMIB.IgmpCacheTable, self).__init__()
self.yang_name = "igmpCacheTable"
self.yang_parent_name = "IGMP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("igmpCacheEntry", ("igmpcacheentry", IGMPSTDMIB.IgmpCacheTable.IgmpCacheEntry))])
self._leafs = OrderedDict()
self.igmpcacheentry = YList(self)
self._segment_path = lambda: "igmpCacheTable"
self._absolute_path = lambda: "IGMP-STD-MIB:IGMP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(IGMPSTDMIB.IgmpCacheTable, [], name, value)
class IgmpCacheEntry(Entity):
"""
An entry (conceptual row) in the igmpCacheTable.
.. attribute:: igmpcacheaddress (key)
The IP multicast group address for which this entry contains information
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: | |
from collections import defaultdict
from distutils.version import LooseVersion
from functools import partial
import numba
from numba.core import compiler, cgutils, types
from numba.core.errors import TypingError
from numba.core.extending import intrinsic
from numba.experimental import structref
from numba.core.typed_passes import type_inference_stage
import numpy as np
from africanus.averaging.support import _unique_internal
from africanus.experimental.rime.fused.arguments import ArgumentPack
from africanus.experimental.rime.fused.terms.core import StateStructRef
try:
NUMBA_MAJOR, NUMBA_MINOR, _ = LooseVersion(numba.__version__).version
except AttributeError:
# Readthedocs
NUMBA_MAJOR, NUMBA_MINOR = 0, 0
def scalar_scalar(lhs, rhs):
return lhs*rhs
def scalar_diag(lhs, rhs):
return lhs*rhs[0], lhs*rhs[1]
def scalar_full(lhs, rhs):
return lhs*rhs[0], lhs*rhs[1], lhs*rhs[2], lhs*rhs[3]
def diag_scalar(lhs, rhs):
return lhs[0]*rhs, lhs[1]*rhs
def diag_diag(lhs, rhs):
return lhs[0]*rhs[0], lhs[1]*rhs[1]
def diag_full(lhs, rhs):
return (
lhs[0]*rhs[0],
lhs[0]*rhs[1],
lhs[1]*rhs[2],
lhs[1]*rhs[3])
def full_scalar(lhs, rhs):
return (
lhs[0]*rhs,
lhs[1]*rhs,
lhs[2]*rhs,
lhs[3]*rhs)
def full_diag(lhs, rhs):
return (
lhs[0]*rhs[0],
lhs[1]*rhs[1],
lhs[2]*rhs[0],
lhs[3]*rhs[1])
def full_full(lhs, rhs):
return (
lhs[0]*rhs[0] + lhs[1]*rhs[2],
lhs[0]*rhs[1] + lhs[1]*rhs[3],
lhs[2]*rhs[0] + lhs[3]*rhs[2],
lhs[2]*rhs[1] + lhs[3]*rhs[3])
def hermitian_scalar(jones):
return np.conj(jones)
def hermitian_diag(jones):
return (np.conj(jones[0]), np.conj(jones[1]))
def hermitian_full(jones):
return (np.conj(jones[0]),
np.conj(jones[2]),
np.conj(jones[1]),
np.conj(jones[3]))
_jones_typ_map = {
("scalar", "scalar"): scalar_scalar,
("scalar", "diag"): scalar_diag,
("scalar", "full"): scalar_full,
("diag", "scalar"): diag_scalar,
("diag", "diag"): diag_diag,
("diag", "full"): diag_full,
("full", "scalar"): full_scalar,
("full", "diag"): full_diag,
("full", "full"): full_full
}
def classify_arg(arg):
"""
Returns
-------
arg_type : {"scalar", "diag", "full", None}
A string describing the argument type, else `None`
if this is not possible
"""
if isinstance(arg, types.Number):
return "scalar"
elif isinstance(arg, types.BaseTuple):
if len(arg) == 2:
return "diag"
elif len(arg) == 4:
return "full"
return None
def term_mul(lhs, rhs):
"""
Parameters
----------
lhs : :class:`numba.Type`
rhs : :class:`numba.Type`
Returns
-------
multiplier : callable
Function multiplying arguments of types lhs and rhs together
"""
lhs_type = classify_arg(lhs)
rhs_type = classify_arg(rhs)
try:
return _jones_typ_map[(lhs_type, rhs_type)]
except KeyError:
raise TypingError(f"No known multiplication "
f"function for {lhs} and {rhs}")
_hermitian_map = {
"scalar": hermitian_scalar,
"diag": hermitian_diag,
"full": hermitian_full
}
def hermitian(jones):
jones_type = classify_arg(jones)
try:
return _hermitian_map[jones_type]
except KeyError:
raise TypingError(f"No known hermitian function "
f"for {jones}: {jones_type}.")
def unify_jones_terms(typingctx, lhs, rhs):
"""
Unify Jones Term Types.
"""
lhs_type = classify_arg(lhs)
rhs_type = classify_arg(rhs)
corr_map = {"scalar": 1, "diag": 2, "full": 4}
try:
lhs_corrs = corr_map[lhs_type]
rhs_corrs = corr_map[rhs_type]
except KeyError:
raise TypingError(f"{lhs} or {rhs} has no "
f"entry in the {corr_map} "
f"mapping")
lhs_types = (lhs,) if lhs_corrs == 1 else tuple(lhs)
rhs_types = (rhs,) if rhs_corrs == 1 else tuple(rhs)
out_type = typingctx.unify_types(*lhs_types, *rhs_types)
out_corrs = max(lhs_corrs, rhs_corrs)
return out_type if out_corrs == 1 else types.Tuple((out_type,)*out_corrs)
@intrinsic
def tuple_adder(typingctx, t1, t2):
if not isinstance(t1, types.BaseTuple):
raise TypingError(f"{t1} must be a Tuple")
if not isinstance(t2, types.BaseTuple):
raise TypingError(f"{t2} must be a Tuple")
if not len(t1) == len(t2):
raise TypingError(f"len({t1}) != len({t2})")
sig = t1(t1, t2)
def codegen(context, builder, signature, args):
def _add(x, y):
return x + y
[t1, t2] = args
[t1_type, t2_type] = signature.args
return_type = signature.return_type
llvm_ret_type = context.get_value_type(return_type)
ret_tuple = cgutils.get_null_value(llvm_ret_type)
for i, (t1e, t2e) in enumerate(zip(t1_type, t2_type)):
v1 = builder.extract_value(t1, i)
v2 = builder.extract_value(t2, i)
vr = typingctx.unify_types(t1e, t2e)
data = context.compile_internal(builder, _add,
vr(t1e, t2e), [v1, v2])
ret_tuple = builder.insert_value(ret_tuple, data, i)
return ret_tuple
return sig, codegen
class IntrinsicFactory:
KEY_ARGS = ("utime", "time_index",
"uantenna", "antenna1_index", "antenna2_index",
"ufeed", "feed1_index", "feed2_index")
def __init__(self, arg_dependencies):
self.argdeps = arg_dependencies
def _resolve_arg_dependencies(self):
argdeps = self.argdeps
# KEY_ARGS will be created
supplied_args = set(argdeps.names) | set(self.KEY_ARGS)
missing = set(argdeps.desired.keys()) - supplied_args
available_args = set(argdeps.names) | supplied_args
failed_transforms = defaultdict(list)
can_create = {}
# Try create missing argument with transformers
for arg in list(missing):
# We already know how to create it
if arg in can_create:
continue
# We don't know how to create
if arg not in argdeps.maybe_create:
continue
for transformer in argdeps.maybe_create[arg]:
# We didn't have the arguments, make a note of this
if not set(transformer.ARGS).issubset(available_args):
failed_transforms[arg].append(
(transformer, set(transformer.ARGS)))
continue
# The transformer can create arg
if arg not in failed_transforms:
can_create[arg] = transformer
missing.remove(arg)
# Fail if required arguments are missing
for arg in missing:
terms_wanting = argdeps.desired[arg]
err_msgs = []
err_msgs.append(f"{set(terms_wanting)} need(s) '{arg}'.")
if arg in failed_transforms:
for transformer, needed in failed_transforms[arg]:
err_msgs.append(f"{transformer} can create {arg} "
f"but needs {needed}, of which "
f"{needed - set(argdeps.names)} is "
f"missing from the input arguments.")
raise ValueError("\n".join(err_msgs))
opt_defaults = {}
for transformer in can_create.values():
for k, d in transformer.KWARGS.items():
argdeps.optional[k].append((transformer, d))
for k, v in argdeps.optional.items():
_, defaults = zip(*v)
defaults = set(defaults)
if len(defaults) != 1:
raise ValueError(f"Multiple terms: {argdeps.terms} have "
f"contradicting definitions for "
f"{k}: {defaults}")
opt_defaults[k] = defaults.pop()
for name in argdeps.names:
opt_defaults.pop(name, None)
return opt_defaults, can_create
def pack_optionals_and_indices_fn(self):
argdeps = self.argdeps
out_names = (argdeps.names +
tuple(argdeps.optional_defaults.keys()) +
tuple(argdeps.KEY_ARGS))
@intrinsic
def pack_index(typingctx, args):
assert len(args) == len(argdeps.names)
it = zip(argdeps.names, args, range(len(argdeps.names)))
arg_info = {n: (t, i) for n, t, i in it}
key_types = {
"utime": arg_info["time"][0],
"time_index": types.int64[:],
"uantenna": arg_info["antenna1"][0],
"antenna1_index": types.int64[:],
"antenna2_index": types.int64[:],
"ufeed": arg_info["feed1"][0],
"feed1_index": types.int64[:],
"feed2_index": types.int64[:]
}
if tuple(key_types.keys()) != argdeps.KEY_ARGS:
raise RuntimeError(
f"{tuple(key_types.keys())} != {argdeps.KEY_ARGS}")
rvt = typingctx.resolve_value_type_prefer_literal
optionals = [(n, rvt(d), d) for n, d
in argdeps.optional_defaults.items()]
optional_types = tuple(p[1] for p in optionals)
return_type = types.Tuple(args.types + optional_types +
tuple(key_types.values()))
sig = return_type(args)
def codegen(context, builder, signature, args):
return_type = signature.return_type
llvm_ret_type = context.get_value_type(return_type)
ret_tuple = cgutils.get_null_value(llvm_ret_type)
# Extract supplied arguments from original arg tuple
# and insert into the new one
for i, typ in enumerate(signature.args[0]):
value = builder.extract_value(args[0], i)
context.nrt.incref(builder, signature.args[0][i], value)
ret_tuple = builder.insert_value(ret_tuple, value, i)
n = len(signature.args[0])
# Insert necessary optional defaults (kwargs) into the
# new argument tuple
for i, (name, typ, default) in enumerate(optionals):
if name != out_names[i + n]:
raise TypingError(f"{name} != {out_names[i + n]}")
value = context.get_constant_generic(builder, typ, default)
ret_tuple = builder.insert_value(ret_tuple, value, i + n)
# Compute indexing arguments and insert into
# the new tuple
fn_args = [builder.extract_value(args[0], arg_info[a][1])
for a in argdeps.REQUIRED_ARGS]
fn_arg_types = tuple(arg_info[k][0] for k
in argdeps.REQUIRED_ARGS)
fn_sig = types.Tuple(list(key_types.values()))(*fn_arg_types)
def _indices(time, antenna1, antenna2, feed1, feed2):
utime, _, time_index, _ = _unique_internal(time)
uants = np.unique(np.concatenate((antenna1, antenna2)))
ufeeds = np.unique(np.concatenate((feed1, feed2)))
antenna1_index = np.searchsorted(uants, antenna1)
antenna2_index = np.searchsorted(uants, antenna2)
feed1_index = np.searchsorted(ufeeds, feed1)
feed2_index = np.searchsorted(ufeeds, feed2)
return (utime, time_index,
uants, antenna1_index, antenna2_index,
ufeeds, feed1_index, feed2_index)
index = context.compile_internal(builder, _indices,
fn_sig, fn_args)
n += len(optionals)
for i, (name, value) in enumerate(key_types.items()):
if name != out_names[i + n]:
raise TypingError(f"{name} != {out_names[i + n]}")
value = builder.extract_value(index, i)
ret_tuple = builder.insert_value(ret_tuple, value, i + n)
return ret_tuple
return sig, codegen
return out_names, pack_index
def pack_transformed_fn(self, arg_names):
argdeps = self.argdeps
transformers = list(set(t for _, t in argdeps.can_create.items()))
out_names = arg_names + tuple(o for t in transformers
for o in t.OUTPUTS)
@intrinsic
def pack_transformed(typingctx, args):
assert len(args) == len(arg_names)
it = zip(arg_names, args, range(len(arg_names)))
arg_info = {n: (t, i) for n, t, i in it}
rvt = typingctx.resolve_value_type_prefer_literal
transform_output_types = []
for transformer in transformers:
# Figure out argument types for calling init_fields
kw = {}
for a in transformer.ARGS:
kw[a] = arg_info[a][0]
for a, d in transformer.KWARGS.items():
try:
kw[a] = arg_info[a][0]
except KeyError:
kw[a] = rvt(d)
fields, _ = transformer.init_fields(typingctx, **kw)
if len(transformer.OUTPUTS) == 0:
raise TypingError(f"{transformer} produces no outputs")
elif len(transformer.OUTPUTS) > 1:
if len(transformer.OUTPUTS) != len(fields):
raise TypingError(
f"{transformer} produces {transformer.OUTPUTS} "
f"but {transformer}.init_fields does not return "
f"a tuple of the same length, but {fields}")
transform_output_types.extend(t for _, t in fields)
# Create a return tuple containing the existing arguments
# with the transformed outputs added to the end
return_type = types.Tuple(args.types +
tuple(transform_output_types))
# Sanity check
if len(return_type) != len(out_names):
raise TypingError(f"len(return_type): {len(return_type)} != "
f"len(out_names): {len(out_names)}")
sig = return_type(args)
def codegen(context, builder, signature, args):
return_type = signature.return_type
llvm_ret_type = context.get_value_type(return_type)
ret_tuple = cgutils.get_null_value(llvm_ret_type)
# Extract supplied arguments from original arg tuple
# and insert into the new one
for i, typ in enumerate(signature.args[0]):
value = builder.extract_value(args[0], i)
context.nrt.incref(builder, signature.args[0][i], value)
ret_tuple = builder.insert_value(ret_tuple, value, i)
# Apply any argument transforms and insert their results
# into the new argument tuple
n = len(signature.args[0])
i = 0
for transformer in transformers:
# Check that outputs line up with output names
for j, o in enumerate(transformer.OUTPUTS):
if o != out_names[i + j + n]:
raise TypingError(f"{o} != {out_names[i + j + n]}")
transform_args | |
srcFreq = int(Database.getSymbolValue("core", "GCLK_1_FREQ"))
# OSCULP32K
elif src == "OSCULP32K":
srcFreq = 32768
# XOSC32K
elif src == "XOSC32K":
srcFreq = int(Database.getSymbolValue("core", "XOSC32K_FREQ"))
# DFLL
elif src == "DFLL":
srcFreq = int(Database.getSymbolValue("core", "DFLL_CLOCK_FREQ"))
# FDPLL0
elif src == "FDPLL0":
srcFreq = int(Database.getSymbolValue("core", "DPLL0_CLOCK_FREQ"))
# FDPLL1
elif src == "FDPLL1":
srcFreq = int(Database.getSymbolValue("core", "DPLL1_CLOCK_FREQ"))
divSel = int(Database.getSymbolValue(
"core", "GCLK_" + index + "_DIVSEL"))
div = int(Database.getSymbolValue("core", "GCLK_" + index + "_DIV"))
if divSel == 0:
if div != 0:
gclk_freq = int(srcFreq / float(div))
else:
gclk_freq = srcFreq
elif divSel == 1:
gclk_freq = int(srcFreq / float(2**(div + 1)))
prevFreq = symbol.getValue()
if prevFreq != gclk_freq:
if gclk_freq < 4294967295:
symbol.setValue(gclk_freq, 1)
else:
if symbol.getValue() > 0:
symbol.setValue(0, 1)
def topsort(graph):
from collections import deque
# Initialize the degree of vetexes to zero and increment dependents by 1
degreeList = {}
for vertex in graph:
degreeList[vertex] = 0
for vertex in graph:
for dependent in graph[vertex]:
degreeList[dependent] = degreeList[dependent] + 1
# initialize a dequeue pipe
pipe = deque()
# move vertexes with zero degree to the starting of pipe
for vertex in degreeList:
if degreeList[vertex] == 0:
pipe.appendleft(vertex)
outputList = []
# move vertexes with degree 0 to output list
# visit the dependent and reduce the degree by one for every visited dependent
while pipe:
vertex = pipe.pop()
outputList.append(vertex)
for dependent in graph[vertex]:
degreeList[dependent] -= 1
if degreeList[dependent] == 0:
pipe.appendleft(dependent)
# If there are no cycles that is the max degree of all vertices is 1
# then the length of list should be equal to total number of vertices in graph else a cycle has been formed
if len(outputList) == len(graph):
return outputList
else:
return []
def codeGen(symbol, event):
global codeGenerationDep
global topsort
global gclkSym_GENCTRL_SRC
global cycleFormed
from collections import defaultdict
sourceDestmap = defaultdict(list)
sourceDestmap = {
"DFLL": [],
"FDPLL0": [],
"FDPLL1": [],
"GCLK0": [],
"GCLK1": [],
"GCLK2": [],
"GCLK3": [],
"GCLK4": [],
"GCLK5": [],
"GCLK6": [],
"GCLK7": [],
"GCLK8": [],
"GCLK9": [],
"GCLK10": [],
"GCLK11": []
}
symbol.clearValues()
codeList = []
if (Database.getSymbolValue("core", "CONFIG_CLOCK_DFLL_ENABLE")) == True:
if((int(Database.getSymbolValue("core", "CONFIG_CLOCK_DFLL_OPMODE"))) == 1 and (Database.getSymbolValue("core", "CONFIG_CLOCK_DFLL_USB") == False)):
sourceDestmap["GCLK" + str(Database.getSymbolValue("core",
"GCLK_ID_0_GENSEL"))].append("DFLL")
if (Database.getSymbolValue("core", "CONFIG_CLOCK_DPLL0_ENABLE")) == True:
if((int(Database.getSymbolValue("core", "CONFIG_CLOCK_DPLL0_REF_CLOCK"))) == 3):
sourceDestmap["GCLK" + str(Database.getSymbolValue("core",
"GCLK_ID_1_GENSEL"))].append("FDPLL0")
if (Database.getSymbolValue("core", "CONFIG_CLOCK_DPLL1_ENABLE")) == True:
if((int(Database.getSymbolValue("core", "CONFIG_CLOCK_DPLL1_REF_CLOCK"))) == 3):
sourceDestmap["GCLK" + str(Database.getSymbolValue("core",
"GCLK_ID_2_GENSEL"))].append("FDPLL1")
for i in range(0, 12):
if Database.getSymbolValue("core", "GCLK_INST_NUM" + str(i)):
if gclkSym_GENCTRL_SRC[i].getSelectedKey() in ["DFLL", "FDPLL0", "FDPLL1", "GCLK1"]:
sourceDestmap[gclkSym_GENCTRL_SRC[i].getSelectedKey()].append(
"GCLK" + str(i))
codeList = topsort(sourceDestmap)
if len(codeList) != 0:
cycleFormed.setValue(False, 2)
if (Database.getSymbolValue("core", "CONFIG_CLOCK_DPLL0_ENABLE")) == False:
codeList.remove("FDPLL0")
if (Database.getSymbolValue("core", "CONFIG_CLOCK_DPLL1_ENABLE")) == False:
codeList.remove("FDPLL1")
for i in range(0, 12):
if Database.getSymbolValue("core", "GCLK_INST_NUM" + str(i)) == False:
codeList.remove("GCLK" + str(i))
for i in range(0, len(codeList)):
symbol.addValue(" " + codeList[i] + "_Initialize();")
else:
cycleFormed.setValue(True, 2)
def clkSetup(symbol, event):
global indexSymbolMap
symbolKey = ""
status = False
if "_CLOCK_ENABLE" in event["id"]:
for key, value in indexSymbolMap.iteritems():
for i in range(0, len(value)):
if value[i] == event["id"].split("_CLOCK_ENABLE")[0]:
symbolKey = key
break
symbolValues = indexSymbolMap.get(symbolKey)
for i in symbolValues:
status = status | Database.getSymbolValue(
"core", i + "_CLOCK_ENABLE")
Database.setSymbolValue("core", symbolKey + "_CHEN", status, 2)
if event["value"]:
freq = Database.getSymbolValue("core", symbolKey + "_FREQ")
Database.setSymbolValue("core", event["id"].split(
"_CLOCK_ENABLE")[0] + "_CLOCK_FREQUENCY", freq, 2)
else:
Database.setSymbolValue("core", event["id"].split(
"_CLOCK_ENABLE")[0] + "_CLOCK_FREQUENCY", 0, 2)
if "_FREQ" in event["id"]:
symbolKey = event["id"].split("_FREQ")[0]
symbolValues = indexSymbolMap.get(symbolKey)
for i in symbolValues:
if Database.getSymbolValue("core", i + "_CLOCK_ENABLE"):
freq = Database.getSymbolValue("core", symbolKey + "_FREQ")
Database.setSymbolValue(
"core", i + "_CLOCK_FREQUENCY", freq, 2)
def calcGclkDivider(symbol, event):
index = symbol.getID().split("_")[1]
divSel = int(Database.getSymbolValue("core", "GCLK_" + index + "_DIVSEL"))
div = int(Database.getSymbolValue("core", "GCLK_" + index + "_DIV"))
if divSel == 0:
if div != 0:
divider = div
else:
divider = 1
elif divSel == 1:
divider = 2**(div + 1)
symbol.setValue(divider, 2)
def setGCLKIOFreq(symbol, event):
index = int(symbol.getID().split("GCLK_IO_")[1].split("_FREQ")[0])
enable = Database.getSymbolValue(
"core", "GCLK_" + str(index) + "_OUTPUTENABLE")
if enable:
symbol.setValue(int(Database.getSymbolValue(
"core", "GCLK_" + str(index) + "_FREQ")), 2)
else:
if symbol.getValue() > 0:
symbol.setValue(0, 1)
################################################################################
####### GCLK Database Components ###########################
################################################################################
gclkDependencyList = []
global gclkSym_num, gclkSym_GENCTRL_DIVSEL, gclkSym_GENCTRL_DIV
gclkSym_num = []
gclkSym_GENCTRL_RUNSTDBY = []
gclkSym_GENCTRL_OE = []
gclkSym_GENCTRL_OOV = []
global gclkSym_GENCTRL_IDC
gclkSym_GENCTRL_IDC = []
gclkSym_GCLK_IO_FREQ = []
gclkSym_GENCTRL_GENEN = []
gclkSym_GENCTRL_DIVSEL = []
gclkSym_GENCTRL_DIV = []
gclkSym_GENCTRL_DIVIDER_VALUE = []
global gclkSym_GENCTRL_SRC
gclkSym_GENCTRL_SRC = []
gclkSym_index = []
gclkSym_Freq = []
global codeGenerationDep
codeGenerationDep = []
triggerdepList = []
global indexSymbolMap
indexSymbolMap = defaultdict(list)
global cycleFormed
# ------------------------- ATDF Read -------------------------------------
packageName = str(Database.getSymbolValue("core", "COMPONENT_PACKAGE"))
channel = []
availablePins = [] # array to save available pins
gclk_io_signals = [False, False, False, False, False, False, False,
False, False, False, False, False] # array to save available signals
pinout = ""
numPads = 0
val = ATDF.getNode("/avr-tools-device-file/variants")
children = val.getChildren()
for index in range(0, len(children)):
if packageName in children[index].getAttribute("package"):
pinout = children[index].getAttribute("pinout")
children = []
val = ATDF.getNode(
"/avr-tools-device-file/pinouts/pinout@[name=\"" + str(pinout) + "\"]")
children = val.getChildren()
for pad in range(0, len(children)):
availablePins.append(children[pad].getAttribute("pad"))
gclk = ATDF.getNode(
"/avr-tools-device-file/devices/device/peripherals/module@[name=\"GCLK\"]/instance@[name=\"GCLK\"]/signals")
wakeup_signals = gclk.getChildren()
for pad in range(0, len(wakeup_signals)):
if "index" in wakeup_signals[pad].getAttributeList():
padSignal = wakeup_signals[pad].getAttribute("pad")
if padSignal in availablePins:
gclk_io_signals[int(
wakeup_signals[pad].getAttribute("index"))] = True
for gclknumber in range(0, 12):
gclkSym_num.append(gclknumber)
gclkSym_num[gclknumber] = coreComponent.createBooleanSymbol(
"GCLK_INST_NUM" + str(gclknumber), gclkGen_Menu)
gclkSym_num[gclknumber].setLabel(
"Enable Generic Clock Generator " + str(gclknumber))
if(gclknumber == 0):
gclkSym_num[gclknumber].setDefaultValue(True)
gclkSym_num[gclknumber].setReadOnly(True)
# GCLK Generator Run StandBy
gclkSym_GENCTRL_RUNSTDBY.append(gclknumber)
gclkSym_GENCTRL_RUNSTDBY[gclknumber] = coreComponent.createBooleanSymbol(
"GCLK_" + str(gclknumber) + "_RUNSTDBY", gclkSym_num[gclknumber])
gclkSym_GENCTRL_RUNSTDBY[gclknumber].setLabel(
"GCLK should keep running in Standby mode")
# GCLK External Clock input frequency
if(gclk_io_signals[gclknumber] == True):
numPads = numPads + 1
gclkSym_GCLK_IO_FREQ.append(gclknumber)
gclkSym_GCLK_IO_FREQ[gclknumber] = coreComponent.createIntegerSymbol(
"GCLK_IO_" + str(gclknumber) + "_FREQ", gclkSym_num[gclknumber])
gclkSym_GCLK_IO_FREQ[gclknumber].setLabel(
"External Input (GCLK_IO[" + str(gclknumber) + "]) Frequency")
gclkSym_GCLK_IO_FREQ[gclknumber].setDefaultValue(0)
gclkSym_GCLK_IO_FREQ[gclknumber].setDependencies(setGCLKIOFreq, [
"GCLK_" + str(gclknumber) + "_FREQ", "GCLK_" + str(gclknumber) + "_OUTPUTENABLE"])
# GCLK Generator Source Selection
gclkSym_GENCTRL_SRC.append(gclknumber)
gclkSym_GENCTRL_SRC[gclknumber] = coreComponent.createKeyValueSetSymbol(
"GCLK_" + str(gclknumber) + "_SRC", gclkSym_num[gclknumber])
gclkSym_GENCTRL_SRC[gclknumber].setLabel("Source Selection")
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"XOSC0", "0", "External Crystal 0 Oscillator")
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"XOSC1", "1", "External Crystal 1 Oscillator")
gclk_in = "GCLK_IN[" + str(gclknumber) + "]"
gclk_in_desc = "Generator Input Pad (" + \
"GCLK_IN[" + str(gclknumber) + "])"
if(gclk_io_signals[gclknumber] == True):
gclkSym_GENCTRL_SRC[gclknumber].addKey(gclk_in, "2", gclk_in_desc)
if gclknumber != 1:
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"GCLK1", "3", "GCLK Generator 1")
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"<KEY>", "4", "32 KHz Ultra Low-Power Internal Oscillator")
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"XOSC32K", "5", "32.768 KHz External Crystal Oscillator")
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"DFLL", "6", "DFLL Oscillator Output")
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"FDPLL0", "7", "Franctional DPLL0 Output")
gclkSym_GENCTRL_SRC[gclknumber].addKey(
"FDPLL1", "8", "Franctional DPLL1 Output")
gclkSym_GENCTRL_SRC[gclknumber].setDefaultValue(6)
gclkSym_GENCTRL_SRC[gclknumber].setOutputMode("Value")
gclkSym_GENCTRL_SRC[gclknumber].setDisplayMode("Key")
# GCLK Generator Output Enable
if(gclk_io_signals[gclknumber] == True):
gclkSym_GENCTRL_OE.append(gclknumber)
gclkSym_GENCTRL_OE[gclknumber] = coreComponent.createBooleanSymbol(
"GCLK_" + str(gclknumber) + "_OUTPUTENABLE", gclkSym_num[gclknumber])
gclkSym_GENCTRL_OE[gclknumber].setLabel(
"Output GCLK clock signal on IO pin?")
# GCLK Generator Output Off Value
if(gclk_io_signals[gclknumber] == True):
gclkSym_GENCTRL_OOV.append(gclknumber)
gclkSym_GENCTRL_OOV[gclknumber] = coreComponent.createKeyValueSetSymbol(
"GCLK_" + str(gclknumber) + "_OUTPUTOFFVALUE", gclkSym_GENCTRL_OE[gclknumber])
gclkSym_GENCTRL_OOV[gclknumber].setLabel("Output Off Value")
gclkSym_GENCTRL_OOV[gclknumber].addKey("LOW", "0", "Logic Level 0")
gclkSym_GENCTRL_OOV[gclknumber].addKey("HIGH", "1", "Logic Level 1")
gclkSym_GENCTRL_OOV[gclknumber].setDefaultValue(0)
gclkSym_GENCTRL_OOV[gclknumber].setOutputMode("Key")
gclkSym_GENCTRL_OOV[gclknumber].setDisplayMode("Description")
gclkInFreq = coreComponent.createBooleanSymbol(
"GCLK_IN_" + str(gclknumber) + "_FREQ", gclkSym_num[gclknumber])
gclkInFreq.setLabel("Gclk Input Frequency")
gclkInFreq.setDefaultValue(0)
# GCLK Generator Division Selection
gclkSym_GENCTRL_DIVSEL.append(gclknumber)
gclkSym_GENCTRL_DIVSEL[gclknumber] = coreComponent.createKeyValueSetSymbol(
"GCLK_" + str(gclknumber) + "_DIVSEL", gclkSym_num[gclknumber])
gclkSym_GENCTRL_DIVSEL[gclknumber].setLabel("Divide Selection")
gclkSymGenDivSelNode = ATDF.getNode(
"/avr-tools-device-file/modules/module@[name=\"GCLK\"]/value-group@[name=\"GCLK_GENCTRL__DIVSEL\"]")
gclkSymGenDivSelNodeValues = []
gclkSymGenDivSelNodeValues = gclkSymGenDivSelNode.getChildren()
gclkSymGenDivSelDefaultValue = 0
for index in range(0, len(gclkSymGenDivSelNodeValues)):
gclkSymGenDivSelKeyName = gclkSymGenDivSelNodeValues[index].getAttribute(
"name")
if (gclkSymGenDivSelKeyName == "DIV1"):
gclkSymGenDivSelDefaultValue = index
gclkSymGenDivSelKeyDescription = gclkSymGenDivSelNodeValues[index].getAttribute(
"caption")
gclkSymGenDivSelKeyValue = gclkSymGenDivSelNodeValues[index].getAttribute(
"value")
gclkSym_GENCTRL_DIVSEL[gclknumber].addKey(
gclkSymGenDivSelKeyName, gclkSymGenDivSelKeyValue, gclkSymGenDivSelKeyDescription)
gclkSym_GENCTRL_DIVSEL[gclknumber].setDefaultValue(
gclkSymGenDivSelDefaultValue)
gclkSym_GENCTRL_DIVSEL[gclknumber].setOutputMode("Key")
gclkSym_GENCTRL_DIVSEL[gclknumber].setDisplayMode("Description")
# GCLK Generator Division Factor
gclkSym_GENCTRL_DIV.append(gclknumber)
gclkSym_GENCTRL_DIV[gclknumber] = coreComponent.createIntegerSymbol(
"GCLK_" + str(gclknumber) + "_DIV", gclkSym_num[gclknumber])
gclkSym_GENCTRL_DIV[gclknumber].setLabel("Division Factor")
if (gclknumber == 1):
gclkSym_GENCTRL_DIV[gclknumber].setMax(0xFFFF)
else:
gclkSym_GENCTRL_DIV[gclknumber].setMax(0xFF)
gclkSym_GENCTRL_DIV[gclknumber].setMin(0)
gclkSym_GENCTRL_DIV[gclknumber].setDefaultValue(1)
# GCLK Generator Division Factor to show in the UI
gclkSym_GENCTRL_DIVIDER_VALUE.append(gclknumber)
gclkSym_GENCTRL_DIVIDER_VALUE[gclknumber] = coreComponent.createIntegerSymbol(
"GCLK_" + str(gclknumber) + "_DIVIDER_VALUE", gclkSym_num[gclknumber])
gclkSym_GENCTRL_DIVIDER_VALUE[gclknumber].setVisible(False)
gclkSym_GENCTRL_DIVIDER_VALUE[gclknumber].setDefaultValue(1)
gclkSym_GENCTRL_DIVIDER_VALUE[gclknumber].setDependencies(calcGclkDivider, [
"GCLK_" + str(gclknumber) + "_DIV", "GCLK_" + str(gclknumber) + "_DIVSEL"])
# GCLK Generator Improve Duty Cycle
gclkSym_GENCTRL_IDC.append(gclknumber)
gclkSym_GENCTRL_IDC[gclknumber] = coreComponent.createBooleanSymbol(
"GCLK_" + str(gclknumber) + "_IMPROVE_DUTYCYCLE", gclkSym_num[gclknumber])
gclkSym_GENCTRL_IDC[gclknumber].setLabel("Enable 50/50 Duty Cycle")
gclkSym_Freq.append(gclknumber)
gclkSym_Freq[gclknumber] = coreComponent.createIntegerSymbol(
"GCLK_" + str(gclknumber) + "_FREQ", gclkSym_num[gclknumber])
gclkSym_Freq[gclknumber].setLabel(
"GCLK" + str(gclknumber) + " Clock Frequency")
gclkSym_Freq[gclknumber].setReadOnly(True)
if(gclknumber == 0):
gclkSym_Freq[gclknumber].setDefaultValue(0)
else:
gclkSym_Freq[gclknumber].setDefaultValue(0)
depList = ["GCLK_" + str(gclknumber) + "_DIVSEL",
"GCLK_" + str(gclknumber) + "_DIV",
"GCLK_" + str(gclknumber) + "_SRC",
"GCLK_INST_NUM" + str(gclknumber),
"XOSC0_FREQ",
"XOSC1_FREQ",
"OSC48M_CLOCK_FREQ",
"DPLL0_CLOCK_FREQ",
"DPLL1_CLOCK_FREQ",
"DFLL_CLOCK_FREQ",
"XOSC32K_FREQ",
"GCLK_IN_0_FREQ",
"GCLK_IN_1_FREQ",
"GCLK_IN_2_FREQ",
"GCLK_IN_3_FREQ",
"GCLK_IN_4_FREQ",
"GCLK_IN_5_FREQ",
"GCLK_IN_6_FREQ",
"GCLK_IN_7_FREQ",
"GCLK_IN_8_FREQ",
"GCLK_IN_9_FREQ",
"GCLK_IN_10_FREQ",
"GCLK_IN_11_FREQ"
]
if gclknumber != 1:
depList.append("GCLK_1_FREQ")
gclkSym_Freq[gclknumber].setDependencies(setGClockFreq, depList)
codeGenerationDep.append("GCLK_" + str(gclknumber) + "_SRC")
codeGenerationDep.append("GCLK_INST_NUM" + str(gclknumber))
maxGCLKId = 0
gclkIOpads = coreComponent.createIntegerSymbol("GCLK_NUM_PADS", None)
gclkIOpads.setVisible(False)
gclkIOpads.setDefaultValue(numPads)
cycleFormed = coreComponent.createBooleanSymbol("GCLK_CYCLE_FORMED", clkMenu)
cycleFormed.setDefaultValue(False)
cycleFormed.setVisible(False)
atdfFilePath = | |
declared as
``global`` or ``nonlocal`` in a given lexical scope shadow names from the
``let`` environment *for the entirety of that lexical scope*. (This is
modeled after Python's standard scoping rules.)
**CAUTION**: assignment to the let environment is ``name << value``;
the regular syntax ``name = value`` creates a local variable in the
lexical scope of the ``def``.
"""
with dyn.let(gen_sym=gen_sym):
return _destructure_and_apply_let(tree, args, _dlet)
@macros.decorator
def dletseq(tree, args, *, gen_sym, **kw):
"""[syntax, decorator] Decorator version of letseq, for 'letseq over def'.
Expands to nested function definitions, each with one ``dlet`` decorator.
Example::
@dletseq((x, 1),
(x, x+1),
(x, x+2))
def g(a):
return a + x
assert g(10) == 14
"""
with dyn.let(gen_sym=gen_sym):
return _destructure_and_apply_let(tree, args, _dletseq)
@macros.decorator
def dletrec(tree, args, *, gen_sym, **kw):
"""[syntax, decorator] Decorator version of letrec, for 'letrec over def'.
Example::
@dletrec((evenp, lambda x: (x == 0) or oddp(x - 1)),
(oddp, lambda x: (x != 0) and evenp(x - 1)))
def f(x):
return evenp(x)
assert f(42) is True
assert f(23) is False
Same cautions apply as to ``dlet``.
"""
with dyn.let(gen_sym=gen_sym):
return _destructure_and_apply_let(tree, args, _dletrec)
@macros.decorator
def blet(tree, args, *, gen_sym, **kw):
"""[syntax, decorator] def --> let block.
Example::
@blet((x, 21))
def result():
return 2*x
assert result == 42
"""
with dyn.let(gen_sym=gen_sym):
return _destructure_and_apply_let(tree, args, _blet)
@macros.decorator
def bletseq(tree, args, *, gen_sym, **kw):
"""[syntax, decorator] def --> letseq block.
Example::
@bletseq((x, 1),
(x, x+1),
(x, x+2))
def result():
return x
assert result == 4
"""
with dyn.let(gen_sym=gen_sym):
return _destructure_and_apply_let(tree, args, _bletseq)
@macros.decorator
def bletrec(tree, args, *, gen_sym, **kw):
"""[syntax, decorator] def --> letrec block.
Example::
@bletrec((evenp, lambda x: (x == 0) or oddp(x - 1)),
(oddp, lambda x: (x != 0) and evenp(x - 1)))
def result():
return evenp(42)
assert result is True
Because names inside a ``def`` have mutually recursive scope,
an almost equivalent pure Python solution (no macros) is::
from unpythonic.misc import call
@call
def result():
evenp = lambda x: (x == 0) or oddp(x - 1)
oddp = lambda x: (x != 0) and evenp(x - 1)
return evenp(42)
assert result is True
"""
with dyn.let(gen_sym=gen_sym):
return _destructure_and_apply_let(tree, args, _bletrec)
# -----------------------------------------------------------------------------
# Imperative code in expression position.
@macros.expr
def do(tree, *, gen_sym, **kw):
"""[syntax, expr] Stuff imperative code into an expression position.
Return value is the value of the last expression inside the ``do``.
See also ``do0``.
Usage::
do[body0, ...]
Example::
do[local[x << 42],
print(x),
x << 23,
x]
This is sugar on top of ``unpythonic.seq.do``, but with some extra features.
- To declare and initialize a local name, use ``local[name << value]``.
The operator ``local`` is syntax, not really a function, and it
only exists inside a ``do``.
- By design, there is no way to create an uninitialized variable;
a value must be given at declaration time. Just use ``None``
as an explicit "no value" if needed.
- Names declared within the same ``do`` must be unique. Re-declaring
the same name is an expansion-time error.
- To assign to an already declared local name, use ``name << value``.
**local name declarations**
A ``local`` declaration comes into effect in the expression following
the one where it appears. Thus::
result = []
let((lst, []))[do[result.append(lst), # the let "lst"
local[lst << lst + [1]], # LHS: do "lst", RHS: let "lst"
result.append(lst)]] # the do "lst"
assert result == [[], [1]]
**Syntactic ambiguity**
These two cases cannot be syntactically distinguished:
- Just one body expression, which is a literal tuple or list,
- Multiple body expressions, represented as a literal tuple or list.
``do`` always uses the latter interpretation.
Whenever there are multiple expressions in the body, the ambiguity does not
arise, because then the distinction between the sequence of expressions itself
and its items is clear.
Examples::
do[1, 2, 3] # --> tuple, 3
do[(1, 2, 3)] # --> tuple, 3 (since in Python, the comma creates tuples;
# parentheses are only used for disambiguation)
do[[1, 2, 3]] # --> list, 3
do[[[1, 2, 3]]] # --> list containing a list, [1, 2, 3]
do[([1, 2, 3],)] # --> tuple containing a list, [1, 2, 3]
do[[1, 2, 3],] # --> tuple containing a list, [1, 2, 3]
do[[(1, 2, 3)]] # --> list containing a tuple, (1, 2, 3)
do[((1, 2, 3),)] # --> tuple containing a tuple, (1, 2, 3)
do[(1, 2, 3),] # --> tuple containing a tuple, (1, 2, 3)
It is possible to use ``unpythonic.misc.pack`` to create a tuple from
given elements: ``do[pack(1, 2, 3)]`` is interpreted as a single-item body
that creates a tuple (by calling a function).
Note the outermost brackets belong to the ``do``; they don't yet create a list.
In the *use brackets to denote a multi-expr body* syntax (e.g. ``multilambda``,
``let`` constructs), the extra brackets already create a list, so in those
uses, the ambiguity does not arise. The transformation inserts not only the
word ``do``, but also the outermost brackets. For example::
let((x, 1),
(y, 2))[[
[x, y]]]
transforms to::
let((x, 1),
(y, 2))[do[[ # "do[" is inserted between the two opening brackets
[x, y]]]] # and its closing "]" is inserted here
which already gets rid of the ambiguity.
**Notes**
Macros are expanded in an inside-out order, so a nested ``let`` shadows
names, if the same names appear in the ``do``::
do[local[x << 17],
let((x, 23))[
print(x)], # 23, the "x" of the "let"
print(x)] # 17, the "x" of the "do"
The reason we require local names to be declared is to allow write access
to lexically outer environments from inside a ``do``::
let((x, 17))[
do[x << 23, # no "local[...]"; update the "x" of the "let"
local[y << 42], # "y" is local to the "do"
print(x, y)]]
With the extra bracket syntax, the latter example can be written as::
let((x, 17))[[
x << 23,
local[y << 42],
print(x, y)]]
It's subtly different in that the first version has the do-items in a tuple,
whereas this one has them in a list, but the behavior is exactly the same.
Python does it the other way around, requiring a ``nonlocal`` statement
to re-bind a name owned by an outer scope.
The ``let`` constructs solve this problem by having the local bindings
declared in a separate block, which plays the role of ``local``.
"""
with dyn.let(gen_sym=gen_sym):
return _do(tree)
@macros.expr
def do0(tree, *, gen_sym, **kw):
"""[syntax, expr] Like do, but return the value of the first expression."""
with dyn.let(gen_sym=gen_sym):
return _do0(tree)
# -----------------------------------------------------------------------------
@macros.expr
def let_syntax(tree, args, *, gen_sym, **kw):
with dyn.let(gen_sym=gen_sym): # gen_sym is only needed by the implicit do.
return _destructure_and_apply_let(tree, args, let_syntax_expr, allow_call_in_name_position=True)
# Python has no function overloading, but expr and block macros go into
# different parts of MacroPy's macro registry.
#
# Normal run-time code sees only the dynamically latest definition,
# so the docstring goes here.
@macros.block
def let_syntax(tree, **kw):
"""[syntax] Introduce local **syntactic** bindings.
**Expression variant**::
let_syntax((lhs, rhs), ...)[body]
let_syntax((lhs, rhs), ...)[[body0, ...]]
Alternative haskelly syntax::
let_syntax[((lhs, rhs), ...) in body]
let_syntax[((lhs, rhs), ...) in [body0, ...]]
let_syntax[body, where((lhs, rhs), ...)]
let_syntax[[body0, ...], where((lhs, rhs), ...)]
**Block variant**::
with let_syntax:
with block as xs: # capture a block of statements - bare name
...
with block(a, ...) as xs: # capture a block of statements - template
...
with expr as x: # capture a single expression - bare name
...
with expr(a, ...) as x: # capture a single expression - template
...
body0
...
A single expression can be a ``do[]`` if multiple expressions are needed.
The bindings are applied **at macro expansion time**, substituting
the expression on the RHS for each instance of the corresponding LHS.
Each substitution gets a fresh copy.
This is useful to e.g. locally abbreviate long function names at macro
expansion time (with zero run-time overhead), or to splice in several
(possibly parametric) instances of a common pattern.
In the expression variant, ``lhs`` may be:
- A bare name (e.g. ``x``), or
| |
slice as a TILE operation.
return _SliceOf.function(self, key)
@staticmethod
def for_op(shape, operation, output, name=None):
"""Builds an operation output Value.
Args:
shape (Shape): The symbolic shape of the operation output.
operation (Operation): The operation producing the output.
output (str): The name of the operation output.
name (str): A mnemonic name for the `Value`, or None.
Returns:
Value: The operation output.
"""
return Value(shape, None, Source(operation, output), name)
@staticmethod
def from_ndims(ndims, dtype=plaidml.DType.FLOAT32, name=None):
"""Builds an N-dimensional placeholder Value.
The resulting `Value`'s shape will contain `Value` instances that will
be computed at binding time from the actual dimensions of the bound
tensor.
Args:
ndims (int): The number of dimensions.
dtype (plaidml.DType): The element datatype.
name (str): A mnemonic name for the `Value`, or None.
Returns:
Value: The placeholder value.
"""
return Value.from_var(plaidml.Placeholder(ndims), [None] * ndims, dtype, name)
@staticmethod
def from_dimensions(dimensions, dtype=plaidml.DType.FLOAT32, name=None):
"""Builds an N-dimensional placeholder Value from a list of dimension sizes.
`None` elements in the dimension list will be replaced by `Value` instances
that will be computed at binding time from the actual dimensions of the bound
tensor.
Args:
dimensions (tuple or list): The size of each dimension.
dtype (plaidml.DType): The element datatype.
name (str): A mnemonic name for the `Value`, or None.
Returns:
Value: The placeholder value.
"""
return Value.from_var(plaidml.Placeholder(len(dimensions)), dimensions, dtype, name)
@staticmethod
def from_var(var, dimensions, dtype=plaidml.DType.FLOAT32, name=None):
"""Builds a Value from a PlaidML variable.
`None` elements in the dimension list will be replaced by `Value` instances
that will be computed at binding time from the actual dimensions of the bound
tensor.
Args:
var (plaidml.Var): The variable to be wrapped by the Value.
dimensions (tuple or list): The size of each dimension.
dtype (plaidml.DType): The element datatype.
name (str): A mnemonic name for the `Value`, or None.
Returns:
Value: The wrapped value.
"""
ndims = len(dimensions)
# Create the value with a temporary zero-dimensional shape, so that it can
# be supplied to Operation instances that calculate its dimensions.
val = Value(Shape(dtype, tuple()), var, None, name)
# Create the dimensions list.
dims = [val._filldim(ndims, idx, dim) for idx, dim in enumerate(dimensions)]
# Update the Value to have the new shape.
val.shape = Shape(dtype, tuple(dims))
return val
@staticmethod
def from_python_value(py_val, dtype=None, name=None, ctx=None, dev=None):
"""Builds a Value from a Python value.
Note: if the context and device are present, the returned value will always be a concrete
`Value` (wrapping a PlaidML variable, not an `Operation` output). Otherwise, the returned
`Value` may be an `Operation` output.
Args:
var: A value of a standard Python type.
dtype (plaidml.DType): The element datatype, or None.
name (str): A mnemonic name for the `Value`, or None.
ctx (plaidml.context.Context): The context to use for the variable, or None.
dev (plaidml.Device): The device to use for the variable, or None.
Returns:
Value: The wrapped value.
"""
if isinstance(py_val, Value):
return py_val
elif isinstance(py_val, plaidml.Var):
return py_val
elif isinstance(py_val, six.integer_types):
if dtype is None:
dtype = plaidml.DType.INT32
return Value.from_var(plaidml.Integer(py_val), tuple(), dtype, name=name)
elif isinstance(py_val, float):
if dtype is None:
dtype = plaidml.DType.FLOAT32
return Value.from_var(plaidml.Real(py_val), tuple(), dtype, name=name)
elif hasattr(py_val, 'shape') and hasattr(py_val, 'dtype'):
# Assume it's an ndarray.
if len(py_val.shape) == 0:
# Handle 0-dimensional numpy arrays as scalars
return Value.from_python_value(py_val.item())
if ctx and dev:
# We have the device; we can return a value immediately.
tensor = plaidml.Tensor(
dev,
plaidml.Shape(ctx, convert_np_dtype_to_pml(py_val.dtype.name), *py_val.shape))
with tensor.mmap_discard(ctx) as view:
view.copy_from_ndarray(py_val)
view.writeback()
return Value.from_var(tensor,
py_val.shape,
convert_np_dtype_to_pml(py_val.dtype.name),
name='NDArray')
# Otherwise, defer the value creation.
return _NDArray(py_val).sole_output()
else:
raise NotImplementedError('Unable to build a Value from a \'{}\' instance'.format(
py_val.__class__.__name__))
def _filldim(self, ndims, idx, dim):
if dim is not None:
return dim
return self._dim(ndims, idx)
def _dim(self, ndims, idx):
"""The symbolic size a dimension of the supplied variable.
Args:
ndims (int): The total number of dimensions.
idx (int): The 0-based index of the dimension to get.
Returns:
Value: The size of dimension `idx` of `var`.
"""
code = 'function (I[{dims}]) -> (O) {{ O = D{idx}; }}'.format(dims=','.join(
['D{}'.format(i) for i in range(ndims)]),
idx=str(idx))
shape = Shape(plaidml.DType.UINT64, tuple())
operation = Operation(code, [('I', self)], [('O', shape)], name='SymbolicDim')
return operation.outputs['O']
# Python numeric type methods. These allow Value objects to be used in
# ordinary expressions, returning derived Values.
# Logical operations
#
# N.B. We neither define __eq__ nor __ne__, because Value objects are compared for
# equality and inequality in a number of contexts, such as "value in some_list".
# So we use standard Python object definitions for equality/inequality; callers
# that want TILE operations for these should use the operation library's
# `equal()` and `not_equal()` functions.
def __ge__(self, other):
return binary_op(self, other, 'L >= R', dtype=plaidml.DType.BOOLEAN, name='Ge')
def __gt__(self, other):
return binary_op(self, other, 'L > R', dtype=plaidml.DType.BOOLEAN, name='Gt')
def __le__(self, other):
return binary_op(self, other, 'L <= R', dtype=plaidml.DType.BOOLEAN, name='Le')
def __lt__(self, other):
return binary_op(self, other, 'L < R', dtype=plaidml.DType.BOOLEAN, name='Lt')
# Arithmetic operations
def __abs__(self):
return unary_op(self, 'abs(I)', 'Abs')
def __add__(self, other):
if isinstance(other, six.integer_types) and other == 0:
return self
if isinstance(other, float) and other == 0.0:
return self
return binary_op(self, other, 'L + R', name='Add')
def __radd__(self, other):
if isinstance(other, six.integer_types) and other == 0:
return self
if isinstance(other, float) and other == 0.0:
return self
return binary_op(other, self, 'L + R', name='RevAdd')
def __and__(self, other):
return binary_op(self, other, 'L & R', name='And')
def __rand__(self, other):
return binary_op(other, self, 'L & R', name='RevAnd')
def __div__(self, other):
if isinstance(other, six.integer_types) and other == 1:
return self
if isinstance(other, float) and other == 1.0:
return self
return binary_op(self, other, 'L / R', name='Div')
def __rdiv__(self, other):
return binary_op(other, self, 'L / R', name='RevDiv')
def __floordiv__(self, other):
if isinstance(other, six.integer_types) and other == 1:
return self
if isinstance(other, float) and other == 1.0:
return self
return binary_op(self, other, 'floor(L / R)', name='FloorDiv')
def __rfloordiv__(self, other):
return binary_op(other, self, 'floor(L / R)', name='RevFloorDiv')
def __invert__(self):
return unary_op(self, '~I', 'Invert')
def __lshift__(self, other):
if isinstance(other, six.integer_types) and other == 0:
return self
return binary_op(self, other, 'L << R', name='LShift')
def __rlshift__(self, other):
return binary_op(other, self, 'L << R', name='RevLShift')
def __mul__(self, other):
if isinstance(other, six.integer_types) and other == 1:
return self
if isinstance(other, float) and other == 1.0:
return self
return binary_op(self, other, 'L * R', name='Mul')
def __rmul__(self, other):
if isinstance(other, six.integer_types) and other == 1:
return self
if isinstance(other, float) and other == 1.0:
return self
return binary_op(other, self, 'L * R', name='RevMul')
def __neg__(self):
return unary_op(self, '-I', 'Negate')
def __or__(self, other):
return binary_op(self, other, 'L | R', name='Or')
def __ror__(self, other):
return binary_op(other, self, 'L | R', name='RevOr')
def __pos__(self):
return unary_op(self, 'I', 'Identity')
def __rshift__(self, other):
if isinstance(other, six.integer_types) and other == 0:
return self
return binary_op(self, other, 'L >> R', name='RShift')
def __rrshift__(self, other):
return binary_op(other, self, 'L >> R', name='RevRShift')
def __sub__(self, other):
if isinstance(other, six.integer_types) and other == 0:
return self
if isinstance(other, float) and other == 0.0:
return self
return binary_op(self, other, 'L - R', name='Sub')
def __rsub__(self, other):
if isinstance(other, six.integer_types) and other == 0:
return self
if isinstance(other, float) and other == 0.0:
return self
return binary_op(other, self, 'L - R', name='RevSub')
def __truediv__(self, other):
if isinstance(other, six.integer_types) and other == 1:
return self
if isinstance(other, float) and other == 1.0:
return self
return binary_op(self, other, 'L / R', name='TrueDiv')
def __rtruediv__(self, other):
return binary_op(other, self, 'L / R', name='RevTrueDiv')
def __xor__(self, other):
return binary_op(self, other, 'L ^ R', name='Xor')
def __rxor__(self, other):
return binary_op(other, self, 'L ^ R', name='RevXor')
def compose(ctx, dev, inputs, outputs, updates=None, name='unnamed_function'):
"""Builds a TILE Function that computes the indicated values.
Args:
ctx (plaidml.Context): The context to use for building the function.
dev (plaidml.Device): The device used to build the function (where constants will live)
inputs ([(name, Value)]): A list of named input placeholders.
outputs ([(name, Value)]): A list of named output values.
updates ([(original, updated)]): A list of updates to perform (side-effects).
| |
from panda3d.core import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals
from pirates.reputation import ReputationGlobals
from direct.showbase import DirectObject
from direct.distributed.ClockDelta import *
from direct.directnotify import DirectNotifyGlobal
from direct.gui import DirectGuiGlobals as DGG
from direct.task import Task
from direct.gui.DirectGui import *
from pirates.uberdog.UberDogGlobals import InventoryType
from pirates.world.LocationConstants import *
from otp.otpbase import OTPGlobals
import random
IDEALX = 1280
IDEALY = 1024
tutorialShots = [
'models/gui/loadingScreen_12', 'models/gui/loadingScreen_16', 'models/gui/loadingScreen_33', 'models/gui/loadingScreen_34', 'models/gui/loadingScreen_35', 'models/gui/loadingScreen_36', 'models/gui/loadingScreen_37']
tutorialShots_MoveAim = [
'models/gui/loadingScreen_33', 'models/gui/loadingScreen_36']
screenShots = [
'models/gui/loadingScreen_01', 'models/gui/loadingScreen_02', 'models/gui/loadingScreen_05', 'models/gui/loadingScreen_06', 'models/gui/loadingScreen_07', 'models/gui/loadingScreen_08', 'models/gui/loadingScreen_09', 'models/gui/loadingScreen_10', 'models/gui/loadingScreen_11', 'models/gui/loadingScreen_12', 'models/gui/loadingScreen_13', 'models/gui/loadingScreen_14', 'models/gui/loadingScreen_15', 'models/gui/loadingScreen_16', 'models/gui/loadingScreen_17', 'models/gui/loadingScreen_18', 'models/gui/loadingScreen_19', 'models/gui/loadingScreen_20', 'models/gui/loadingScreen_21', 'models/gui/loadingScreen_22', 'models/gui/loadingScreen_24', 'models/gui/loadingScreen_25', 'models/gui/loadingScreen_26', 'models/gui/loadingScreen_27', 'models/gui/loadingScreen_28', 'models/gui/loadingScreen_29', 'models/gui/loadingScreen_30', 'models/gui/loadingScreen_31', 'models/gui/loadingScreen_32', 'models/gui/loadingScreen_34', 'models/gui/loadingScreen_46']
screenShots_Jungles = [
'models/gui/loadingScreen_13']
screenShots_Swamps = [
'models/gui/loadingScreen_18']
screenShots_Caves = [
'models/gui/loadingScreen_32', 'models/gui/loadingScreen_30', 'models/gui/loadingScreen_31', 'models/gui/loadingScreen_26', 'models/gui/loadingScreen_27', 'models/gui/loadingScreen_29', 'models/gui/loadingScreen_28', 'models/gui/loadingScreen_22']
screenShots_WinterHoliday = [
'models/gui/loadingScreen_38', 'models/gui/loadingScreen_39', 'models/gui/loadingScreen_40']
areaType_Jungles = {'1161798288.34sdnaik': 0,'1164141722.61sdnaik': 1,'1169592956.59sdnaik': 2,'1165004570.58sdnaik': 3,'1165009873.53sdnaik': 4,'1165009856.72sdnaik': 5,'1167857698.16sdnaik': 6,'1172209955.25sdnaik': 7}
areaType_Swamps = {'1169179552.88sdnaik': 0,'1161732578.06sdnaik': 1}
areaType_Caves = {'1164952144.06sdnaik': 0,'1165001772.05sdnaik': 1,'1158121765.09sdnaik': 2,'1167862588.52sdnaik': 3,'1168057131.73sdnaik': 4,'1164929110.98sdnaik': 5,'1172208344.92sdnaik': 6,'1245949184.0akelts': 7,'1235605888.0akelts': 8,'1228348366.44akelts': 9,'1245948731.45akelts': 10,'1245948708.12akelts': 11,'1245946851.97akelts': 12,'1245946794.3akelts': 13}
screenShot_Dinghy = 'models/gui/loadingScreen_08'
screenShot_Jail = 'models/gui/loadingScreen_12'
screenShot_Weapon = 'models/gui/loadingScreen_35'
screenShot_Cutlass = 'models/gui/loadingScreen_37'
screenShot_EnterGame = 'models/gui/loadingScreen_enter'
screenShot_ExitGame = 'models/gui/loadingScreen_exit'
screenShots_Locations = {
LocationIds.ANVIL_ISLAND: ['models/gui/loadingScreen_01'],
LocationIds.ISLA_CANGREJOS: ['models/gui/loadingScreen_02', 'models/gui/loadingScreen_10'],
LocationIds.CUBA_ISLAND: ['models/gui/loadingScreen_05'],
LocationIds.CUTTHROAT_ISLAND: ['models/gui/loadingScreen_06'],
LocationIds.DEL_FUEGO_ISLAND: ['models/gui/loadingScreen_07'],
LocationIds.DRIFTWOOD_ISLAND: ['models/gui/loadingScreen_09'],
LocationIds.ISLA_PERDIDA: ['models/gui/loadingScreen_11'],
LocationIds.KINGSHEAD_ISLAND: ['models/gui/loadingScreen_14'],
LocationIds.OUTCAST_ISLE: ['models/gui/loadingScreen_19'],
LocationIds.PORT_ROYAL_ISLAND: ['models/gui/loadingScreen_16'],
LocationIds.RUMRUNNER_ISLE: ['models/gui/loadingScreen_17'],
LocationIds.ISLA_TORMENTA: ['models/gui/loadingScreen_15'],
LocationIds.TORTUGA_ISLAND: ['models/gui/loadingScreen_20'],
LocationIds.ANVIL_CAVE_BARBOSA: ['models/gui/loadingScreen_22'],
LocationIds.ISLA_AVARICIA: ['models/gui/loadingScreen_24'],
LocationIds.ISLA_DE_PORC: ['models/gui/loadingScreen_25'],
LocationIds.PORT_ROYAL_CAVE_A: ['models/gui/loadingScreen_32'],
LocationIds.PORT_ROYAL_CAVE_B: ['models/gui/loadingScreen_30'],
LocationIds.TORTUGA_CAVE: ['models/gui/loadingScreen_31'],
LocationIds.DEL_FUEGO_CAVE_C: ['models/gui/loadingScreen_29'],
LocationIds.DEL_FUEGO_CAVE_D: ['models/gui/loadingScreen_26'],
LocationIds.DEL_FUEGO_CAVE_E: ['models/gui/loadingScreen_27'],
LocationIds.TORMENTA_CAVE_B: ['models/gui/loadingScreen_28'],
LocationIds.NASSAU_ISLAND: ['models/gui/loadingScreen_47'],
LocationIds.ANTIGUA_ISLAND: ['models/gui/loadingScreen_48'],
LocationIds.MADRE_DEL_FUEGO_ISLAND: ['models/gui/loacingScreen_49']
}
screenShots_WinterHolidayLocations = {LocationIds.DEL_FUEGO_ISLAND: ['models/gui/loadingScreen_38'],LocationIds.PORT_ROYAL_ISLAND: ['models/gui/loadingScreen_39'],LocationIds.TORTUGA_ISLAND: ['models/gui/loadingScreen_40']}
screenShot_Potions = 'models/gui/loadingScreen_41'
screenShot_BenchRepair = 'models/gui/loadingScreen_42'
screenShot_ShipRepair = 'models/gui/loadingScreen_43'
screenShot_CannonDefense = 'models/gui/loadingScreen_44'
screenShot_Fishing = 'models/gui/loadingScreen_45'
def getOceanHint():
oceans = [
'Windward_Passage', 'Brigand_Bay', 'Bloody_Bayou', 'Scurvy_Shallows', 'Blackheart_Strait', 'Salty_Flats', 'Mar_de_Plata', 'Smugglers_Run', 'The_Hinterseas', 'Dead_Mans_Trough', 'Leeward_Passage', 'Boiling_Bay', 'Mariners_Reef']
ocean = random.choice(oceans)
hints = PLocalizer.HintMap_Locations.get(ocean)
if hints:
hint = random.choice(hints)
else:
hint = random.choice(PLocalizer.Hints_General)
return '%s: %s' % (PLocalizer.LoadingScreen_Hint, hint)
def getGeneralHint():
type = random.choice([0, 1])
if base.cr.isPaid() == OTPGlobals.AccessVelvetRope and type == 1:
hint = random.choice(PLocalizer.Hints_VelvetRope)
else:
hint = random.choice(PLocalizer.Hints_General)
return hint
def getPrivateeringHint():
hint = random.choice(PLocalizer.Hints_Privateering)
return '%s: %s' % (PLocalizer.LoadingScreen_Hint, hint)
def getHint(destId=None, level=None):
if destId and level:
type = random.choice([0, 1, 2])
if type == 0:
hints = PLocalizer.HintMap_Locations.get(destId)
if hints is None:
hint = getGeneralHint()
elif len(hints):
hint = random.choice(hints)
else:
hint = getGeneralHint()
elif type == 1:
hints = PLocalizer.HintMap_Levels.get(level)
if hints is None:
hint = getGeneralHint()
elif len(hints):
hint = random.choice(hints)
else:
hint = getGeneralHint()
else:
hint = getGeneralHint()
elif destId and not level:
type = random.choice([0, 1])
if type == 0:
hints = PLocalizer.HintMap_Locations.get(destId)
if hints is None:
hint = getGeneralHint()
elif len(hints):
hint = random.choice(hints)
else:
hint = getGeneralHint()
else:
hint = getGeneralHint()
elif level and not destId:
type = random.choice([0, 1])
if type == 0:
hints = PLocalizer.HintMap_Levels.get(level)
if hints is None:
hint = getGeneralHint()
elif len(hints):
hint = random.choice(hints)
else:
hint = getGeneralHint()
else:
hint = getGeneralHint()
else:
hint = getGeneralHint()
return '%s: %s' % (PLocalizer.LoadingScreen_Hint, hint)
class FancyLoadingScreen(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('LoadingScreen')
def __init__(self, parent):
DirectObject.DirectObject.__init__(self)
self.debugMode = config.GetInt('loading-screen') == 2
self.parent = parent
self.state = False
self.currScreenshot = None
self.snapshot = None
self.snapshotFrame = None
self.snapshotFrameBasic = None
self.currentTime = 0
self.analyzeMode = False
self.loadScale = 1.0
self.unmappedTicks = []
self.stepInfo = {}
self.accept(base.win.getWindowEvent(), self.adjustSize)
self.accept('tick', self.tick)
self.currStage = 'unmapped'
self.stagePercent = 0
self.numObjects = 0
self.currPercent = 0.0
self.line = LineSegs()
self.line.setColor((0, 0, 0, 1))
self.line.setThickness(1)
self.stageLabel = None
self.currNum = 0
self.overallPercent = 0
self.lastPercent = 0
self.topLock = aspect2dp.attachNewNode('topShift')
self.root = self.topLock.attachNewNode('loadingScreenRoot')
self.root.setZ(-1)
self.root.stash()
self.model = loader.loadModel('models/gui/pir_m_gui_gen_loadScreen.bam')
self.model.setP(90)
self.model.reparentTo(self.root)
cm = CardMaker('backdrop')
cm.setFrame(-10, 10, -10, 10)
if self.debugMode:
self.backdrop = self.root.attachNewNode(cm.generate())
self.backdrop.setX(-1.5)
self.backdrop.setZ(-1)
self.backdrop.setScale(4)
self.backdrop.setColor(0.5, 0.5, 0.5, 1)
cm = CardMaker('loadingBarBase')
cm.setFrame(-0.9, 0.9, 0.1, 0.5)
self.loadingBarBacking = self.root.attachNewNode(cm.generate())
self.loadingBarRoot = self.root.attachNewNode('loadingBarRoot')
cm.setName('analysisBarBase')
cm.setFrame(-0.9, 0.9, -0.5, -0.1)
self.analysisBar = self.root.attachNewNode(cm.generate())
self.analysisBarRoot = self.root.attachNewNode('analysisBarRoot')
self.analysisBar.hide()
self.analysisButtons = []
self.enterToContinue = DirectLabel(parent=self.root, text='Press Shift To Continue', relief=None, text_scale=0.1, pos=(0,
0,
-0.9), text_align=TextNode.ACenter)
self.enterToContinue.hide()
self.stageLabel = DirectLabel(parent=self.root, text='', relief=None, text_scale=0.1, pos=(-1.25,
0,
0.75), text_align=TextNode.ALeft, textMayChange=1)
self.tickLabel = DirectLabel(parent=self.root, text='', relief=None, text_scale=0.1, pos=(0.75,
0,
0.75), textMayChange=1)
self.overallLabel = DirectLabel(parent=self.root, text='', relief=None, text_scale=0.1, pos=(0,
0,
-0.75), textMayChange=1)
else:
self.backdrop = loader.loadModel('models/gui/pir_m_gui_gen_loadScreen')
self.backdrop.reparentTo(self.root)
bg = self.backdrop.find('**/expandable_bg')
bg.setScale(1000, 1, 1000)
bg.flattenStrong()
self.backdrop.find('**/loadbar_grey').setColorScale(0.15, 0.15, 0.15, 0.1)
self.loadingBar = self.backdrop.find('**/loadbar')
self.loadingBar.setColorScale(0.2, 0.6, 0.5, 1)
self.loadingPlank = NodePathCollection()
self.loadingPlank.addPath(self.backdrop.find('**/plank_loading_bar'))
self.loadingPlank.addPath(self.backdrop.find('**/loadbar'))
self.loadingPlank.addPath(self.backdrop.find('**/loadbar_frame'))
self.loadingPlank.addPath(self.backdrop.find('**/loadbar_grey'))
self.titlePlank = self.backdrop.find('**/plank_title')
self.percentLabel = DirectLabel(text='0%', parent=self.root, relief=None, text_font=PiratesGlobals.getPirateFont(), text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_scale=0.031, pos=(0,
0,
-0.4445), textMayChange=1)
self.loadingPlank.addPath(self.percentLabel)
self.screenshot = self.backdrop.find('**/screenshot')
copyGeom = self.loadingBar.find('**/+GeomNode').node().getGeom(0)
format = copyGeom.getVertexData().getFormat()
primitive = copyGeom.getPrimitive(0)
data = GeomVertexData(self.screenshot.node().getGeom(0).getVertexData())
data.setFormat(format)
writer = GeomVertexWriter(data, 'texcoord')
writer.setData2f(0, 0)
writer.setData2f(1, 0)
writer.setData2f(1, 1)
writer.setData2f(0, 1)
geom = Geom(data)
geom.addPrimitive(primitive)
self.screenshot.node().removeGeom(0)
self.screenshot.node().addGeom(geom)
self.titlePlankMiddle = self.backdrop.find('**/plank_title_middle_box')
self.titlePlankLeft = self.backdrop.find('**/plank_title_left')
self.titlePlankRight = self.backdrop.find('**/plank_title_right')
self.loadingBarColors = [ ((i % 10 / 10.0 + 0.5) / 2.0, (i % 100 / 10 / 10.0 + 0.5) / 2.0, (i / 100 / 10.0 + 0.5) / 2.0, 1) for i in range(1000) ]
random.shuffle(self.loadingBarColors)
self.lastUpdateTime = globalClock.getRealTime()
self.locationLabel = DirectLabel(parent=self.root, relief=None, text='', text_font=PiratesGlobals.getPirateOutlineFont(), text_fg=PiratesGuiGlobals.TextFG1, text_shadow=PiratesGuiGlobals.TextShadow, text_scale=PiratesGuiGlobals.TextScaleTitleJumbo * 0.7, text_align=TextNode.ACenter, pos=(0.0,
0.0,
0.515), textMayChange=1)
self.locationText = None
self.hintLabel = DirectLabel(parent=self.root, relief=None, text='', text_font=PiratesGlobals.getPirateOutlineFont(), text_fg=PiratesGuiGlobals.TextFG1, text_shadow=PiratesGuiGlobals.TextShadow, text_scale=PiratesGuiGlobals.TextScaleTitleJumbo * 0.5, text_align=TextNode.ACenter, pos=(0.0, 0.0, -0.62), text_wordwrap=30, textMayChange=1)
self.hintText = None
self.adImage = None
self.allowLiveFlatten = ConfigVariableBool('allow-live-flatten')
self.title_art = []
self.tempVolume = []
self.adjustSize(base.win)
gsg = base.win.getGsg()
if gsg:
self.root.prepareScene(gsg)
return
def startLoading(self, expectedLoadScale):
if not self.debugMode:
self.loadingBar.setSx(0)
self.loadScale = float(expectedLoadScale)
self.currStage = 'unmapped'
self.stagePercent = 0
self.numObjects = 0
self.currPercent = 0.0
self.loadingStart = globalClock.getRealTime()
self.currNum = 0
self.overallPercent = 0
self.lastPercent = 0
self.stepNum = 0
if self.debugMode:
self.overallLabel['text'] = '0.0'
self.stageLabel['text'] = self.currStage
self.update()
def beginStep(self, stageName, amt=0, percent=0.001):
if not self.state:
return
if self.currStage != 'unmapped' and stageName != self.currStage:
if __dev__ and self.debugMode:
self.notify.error('step %s not finished when step %s was started!' % (self.currStage, stageName))
else:
self.notify.warning('step %s not finished when step %s was started!' % (self.currStage, stageName))
return
self.stepNum += 1
if self.debugMode:
stageColor = self.loadingBarColors[self.stepNum]
self.stepInfo[stageName] = [
globalClock.getRealTime() - self.loadingStart, 0.0, stageColor, [], self.lastPercent + self.stagePercent, percent, amt]
self.stepCard = CardMaker('step-%s' % stageName)
self.stepCard.setColor(stageColor)
self.currPoly = NodePath('empty')
self.stageLabel['text'] = stageName
self.tickLabel['text'] = '0.0'
self.currPercent = 0.0
self.overallPercent = min(100.0 * self.loadScale, self.lastPercent + self.stagePercent)
self.lastPercent = self.overallPercent
self.currStage = stageName
self.stagePercent = percent
self.numObjects = amt
self.currNum = 0
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
def endStep(self, stageName):
if self.currStage == 'unmapped':
self.notify.warning('step %s was started before loading screen was enabled' % stageName)
return
if stageName != self.currStage:
if __dev__ and self.debugMode:
self.notify.error('step %s was active while step %s was trying to end!' % (self.currStage, stageName))
else:
return
self.tick()
if self.debugMode:
stageInfo = self.stepInfo[self.currStage]
stageInfo[1] = globalClock.getRealTime() - self.loadingStart - stageInfo[0]
self.currPoly.detachNode()
self.stepCard.setFrame(self.lastPercent / self.loadScale * 0.018 - 0.9, (self.lastPercent + self.stagePercent) / self.loadScale * 0.018 - 0.9, 0.1, 0.5)
self.loadingBarRoot.attachNewNode(self.stepCard.generate())
self.stageLabel['text'] = 'unmapped'
self.currStage = 'unmapped'
self.currPercent = 0.0
def tick(self):
if self.state == False or self.analyzeMode:
return
if self.debugMode:
if self.currStage == 'unmapped':
self.unmappedTicks.append(globalClock.getRealTime() - self.loadingStart)
else:
self.stepInfo[self.currStage][3].append(globalClock.getRealTime() - self.loadingStart)
self.currNum += 1
self.currPercent = min(1.0, self.currNum / float(self.numObjects + 1))
self.overallPercent = min(100.0 * self.loadScale, self.lastPercent + self.currPercent * self.stagePercent)
self.update()
def destroy(self):
taskMgr.remove('updateLoadingScreen')
for part in (self.model, self.snapshot):
if part is not None:
tex = part.findTexture('*')
if tex:
tex.releaseAll()
part.removeNode()
self.model = None
self.snapshot = None
if self.snapshotFrame:
self.snapshotFrame.destroy()
if self.snapshotFrameBasic:
self.snapshotFrameBasic.destroy()
if self.locationLabel:
self.locationLabel.destroy()
if self.hintLabel:
self.hintLabel.destroy()
if self.debugMode:
self.stageLabel.destroy()
self.tickLabel.destroy()
self.overallLabel.destroy()
self.enterToContinue.destroy()
self.stageLabel = None
self.tickLabel = None
self.overallLabel = None
self.enterToContinue = None
self.ignoreAll()
return
def showTitleFrame(self):
if base.config.GetBool('no-loading-screen', 0):
return
for part in self.title_art:
part.show()
def hideTitleFrame(self):
for part in self.title_art:
part.hide()
def show(self, waitForLocation=False, disableSfx=True, expectedLoadScale=1.0):
if self.state or base.config.GetBool('no-loading-screen', 0) or not self.locationLabel:
return
render.hide()
render2d.hide()
render2dp.hide()
if not self.debugMode:
self.loadingPlank.hide()
self.root.unstash()
self.root.showThrough()
self.state = True
gsg = base.win.getGsg()
if gsg:
gsg.setIncompleteRender(False)
base.setTaskChainNetNonthreaded()
self.allowLiveFlatten.setValue(1)
self.startLoading(expectedLoadScale)
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.refreshAds()
taskMgr.add(self.update, 'updateLoadingScreen', priority=-100)
if base.sfxManagerList and disableSfx:
index = 0
while index < len(base.sfxManagerList):
sfx_manager = base.sfxManagerList[index]
sfx_manager.setVolume(0.0)
index += 1
if base.appRunner:
base.appRunner.notifyRequest('onLoadingMessagesStart')
self.__setLocationText(self.locationText)
self.__setHintText(self.hintText)
if not waitForLocation:
screenshot = random.choice(tutorialShots_MoveAim)
self.__setLoadingArt(screenshot)
def showHint(self, destId=None, ocean=False):
if base.config.GetBool('no-loading-screen', 0) or not self.locationLabel:
return
if ocean:
hint = getOceanHint()
else:
if hasattr(base, 'localAvatar'):
totalReputation = 0
level = base.localAvatar.getLevel()
| |
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the maximum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmax, DataArray.idxmax
"""
return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna)
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
__slots__ = ()
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super().__init__(dims, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError("%s objects must be 1-dimensional" % type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def __dask_tokenize__(self):
from dask.base import normalize_token
# Don't waste time converting pd.Index to np.ndarray
return normalize_token((type(self), self._dims, self._data.array, self._attrs))
def load(self):
# data is already loaded into memory for IndexVariable
return self
# https://github.com/python/mypy/issues/1465
@Variable.data.setter # type: ignore
def data(self, data):
raise ValueError(
f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
@Variable.values.setter # type: ignore
def values(self, values):
raise ValueError(
f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
def chunk(self, chunks={}, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _as_sparse(self, sparse_format=_default, fill_value=_default):
# Dummy
return self.copy(deep=False)
def _to_dense(self):
# Dummy
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, "ndim", 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError("%s values cannot be modified" % type(self).__name__)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError(
"IndexVariable.concat requires that all input "
"variables be IndexVariable objects"
)
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(np.concatenate(positions))
data = data.take(indices)
attrs = dict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError("inconsistent dimensions")
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super().equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and self._data_equals(other)
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError("cannot modify name of IndexVariable in-place")
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _unified_dims(variables):
# validate dimensions
all_dims = {}
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError(
"broadcasting cannot handle duplicate "
"dimensions: %r" % list(var_dims)
)
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension %r: %s"
% (d, (all_dims[d], s))
)
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
| |
<filename>tensorflow/contrib/tensor_forest/python/tensor_forest.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
# If tree[i][0] equals this value, then i is a leaf node.
LEAF_NODE = -1
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.training_library_base_dir = getattr(
self, 'training_library_base_dir', '')
self.inference_library_base_dir = getattr(
self, 'inference_library_base_dir', '')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = tf.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=tf.int32,
initializer=tf.constant(
[[-1, -1]] + [[-2, -1]] * (params.max_nodes - 1)))
self.tree_thresholds = tf.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=tf.constant_initializer(-1.0))
self.tree_depths = tf.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=tf.int32,
initializer=tf.constant_initializer(1))
self.end_of_tree = tf.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=tf.int32,
initializer=tf.constant([1]))
if training:
self.non_fertile_leaves = tf.get_variable(
name=self.get_tree_name('non_fertile_leaves', tree_num),
dtype=tf.int32,
initializer=tf.constant([0]))
self.non_fertile_leaf_scores = tf.get_variable(
name=self.get_tree_name('non_fertile_leaf_scores', tree_num),
initializer=tf.constant([1.0]))
self.node_to_accumulator_map = tf.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=tf.int32,
initializer=tf.constant_initializer(-1))
self.candidate_split_features = tf.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=tf.int32,
initializer=tf.constant_initializer(-1))
self.candidate_split_thresholds = tf.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=tf.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = tf.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=tf.constant_initializer(0.0))
if training:
self.candidate_split_sums = tf.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=tf.constant_initializer(0.0))
self.accumulator_sums = tf.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=tf.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = tf.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=tf.constant_initializer(0.0))
self.candidate_split_squares = tf.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=tf.constant_initializer(0.0))
self.accumulator_squares = tf.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=tf.constant_initializer(-1.0))
else:
self.node_squares = tf.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = tf.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = tf.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variable_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with tf.device(device_assigner.get_device(i)):
self.variables.append(tree_variable_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = tf.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None, variables=None,
tree_graphs=None,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
tf.logging.info('Constructing forest with params = ')
tf.logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(self.params.training_library_base_dir),
i_ops.Load(self.params.inference_library_base_dir), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = tf.split(1, self.params.num_features, input_data)
return tf.concat(1, [split_data[ind]
for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
Returns:
The last op in the random forest training graph.
"""
tree_graphs = []
for i in range(self.params.num_trees):
with tf.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = tf.slice(tf.shape(input_data), [0], [1])
r = tf.random_uniform(batch_size, seed=seed)
mask = tf.less(r, tf.ones_like(r) * self.params.bagging_fraction)
gather_indices = tf.squeeze(tf.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = tf.gather(input_data, gather_indices)
tree_labels = tf.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
tree_graphs.append(
self.trees[i].training_graph(tree_data, tree_labels, seed))
return tf.group(*tree_graphs)
def inference_graph(self, input_data):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or placeholder for input data.
Returns:
The last op in the random forest inference graph.
"""
probabilities = []
for i in range(self.params.num_trees):
with tf.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
| |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""Completion evaluation code for JavaScript"""
import logging
import types
import re
from pprint import pformat
from itertools import chain
from codeintel2.common import *
from codeintel2.util import indent
from codeintel2.tree import TreeEvaluator
class CandidatesForTreeEvaluator(TreeEvaluator):
# Note: the "alt" changes added in change 281350 make some of the
# functionality on this class *not* appropriate for the shared
# TreeEvaluator. I.e. _elem_from_scoperef et al should be moved
# *out* of CandidatesForTreeEvaluator.
# This is a dict when set, multiple elements that have the same lpath will
# be set in here, ensuring we get the correct one from an lpath lookup.
# Fixes the following bug:
# http://bugs.activestate.com/show_bug.cgi?id=71666
# Ideally, this would not be needed once elem.names[] can return a tuple,
# see the following bug for reference:
# http://bugs.activestate.com/show_bug.cgi?id=71941
_alt_elem_from_scoperef = None
def _elem_from_scoperef(self, scoperef):
"""A scoperef is (<blob>, <lpath>). Return the actual elem in
the <blob> ciElementTree being referred to.
"""
elem = scoperef[0]
i = 0
for lname in scoperef[1]:
i += 1
if self._alt_elem_from_scoperef is not None:
scoperef_names = ".".join(scoperef[1][:i])
alt_elem = self._alt_elem_from_scoperef.get(scoperef_names)
if alt_elem is not None:
elem = alt_elem
continue
elem = elem.names[lname]
return elem
def _tokenize_citdl_expr(self, expr):
chars = iter(zip(expr, chain(expr[1:], (None,))))
buffer = []
def get_pending_token():
if buffer:
yield "".join(buffer)
del buffer[:]
def get_quoted_string(ch):
quote = ch
local_buffer = []
for ch, next_ in chars:
# print "quote: quote=[%s] ch=[%s] next=[%s] token=%r" % (
# quote, ch, next_, local_buffer)
if ch == "\\":
local_buffer.append(next(chars)[0])
elif ch == quote:
if local_buffer:
yield "".join(local_buffer)
break
else:
local_buffer.append(ch)
BLOCK_MAP = {"(": ")", "[": "]"}
for ch, next_ in chars:
# print "ch=[%s] next=[%s] token=%r" % (ch, next_, buffer)
if ch in ('"', "'"): # quoted string
for token in get_pending_token():
yield token
for token in get_quoted_string(ch):
yield token
elif ch == ".":
for token in get_pending_token():
yield token
buffer = []
elif ch in BLOCK_MAP:
block = [ch, BLOCK_MAP[ch]]
emit = ch in ("[",)
for token in get_pending_token():
yield token
if next_ == block[1]:
next(chars) # consume close quote
yield block[0] + block[1]
elif next_ in ('"', "'"): # quoted string
next(chars) # consume open bracket
next_tokens = list(get_quoted_string(next_))
ch, next_ = next(chars)
if ch == block[1] and emit:
for next_token in next_tokens:
yield next_token
else:
yield block[0] + block[1]
else:
buffer.append(ch)
if buffer:
yield "".join(buffer)
def _join_citdl_expr(self, tokens):
return '.'.join(tokens).replace('.()', '()')
class JavaScriptTreeEvaluator(CandidatesForTreeEvaluator):
def eval_cplns(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
if self.trg.type == "names":
cplns = list(self._completion_names_from_scope(self.expr,
start_scoperef))
else:
hits = self._hits_from_citdl(self.expr, start_scoperef)
cplns = list(self._members_from_hits(hits))
if not cplns:
raise CodeIntelError("No completions found")
# For logging messages every call
# print indent('\n'.join("%s: %s" % (lvl, args and m % (args) or m)
# for lvl,m, args in self.ctlr.log))
# print indent('\n'.join(["Hit: %r" % (cpln, ) for cpln in cplns]))
return cplns
def eval_calltips(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
hits = self._hits_from_citdl(self.expr, start_scoperef)
if not hits:
raise CodeIntelError("No calltips found")
return self._calltips_from_hits(hits)
def eval_defns(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
hits = self._hits_from_citdl(self.expr, start_scoperef, defn_only=True)
if not hits:
raise CodeIntelError("No definitions found")
return [self._defn_from_hit(x) for x in hits]
def parent_scoperef_from_scoperef(self, scoperef,
started_in_builtin_window_scope=False):
"""
For JavaScript-in-the-browser the top-level scope is the
Window object instance. For now we are always presuming we
are running in the browser if the language is JavaScript.
Problem: if we *started* on the Window class then the parent
scope should be -> built-in-blob. This is what
'started_in_builtin_window_scope' is used for.
"""
blob, lpath = scoperef
global_var = self._global_var
if not started_in_builtin_window_scope \
and lpath == [global_var] and blob is self.built_in_blob:
return None
elif lpath:
return (blob, lpath[:-1])
elif blob is self.built_in_blob:
if started_in_builtin_window_scope:
return None
elif global_var is not None:
return (self.built_in_blob, [global_var])
else:
return (self.built_in_blob, [])
@property
def _global_var(self):
"""
The type of the global variable
"""
if self.trg.lang == "Node.js":
return "global"
return "Window"
_langintel = None
@property
def langintel(self):
if self._langintel is None:
self._langintel = self.mgr.langintel_from_lang(self.trg.lang)
return self._langintel
_libs = None
@property
def libs(self):
if self._libs is None:
self._libs = self.langintel.libs_from_buf(self.buf)
return self._libs
@property
def stdlib(self):
# JS stdlib is always the last one.
return self.libs[-1]
_built_in_blob = None
@property
def built_in_blob(self):
if self._built_in_blob is None:
self._built_in_blob = self.stdlib.get_blob("*")
return self._built_in_blob
## Specific element completions ##
def _hit_from_first_token(self, token, scoperef):
"""Find the token at the given or a parent scope.
Returns the found elem and the scope at which it was found. If
not found, this returns (None, None).
"""
self.log("find '%s' starting at %s", token, scoperef)
# Because we fake JavaScript classes and put the ctor
# function inside the class, we need to push start scopes at
# the class to the ctor. See test
# javascript/cpln/ctor_scope_cheat for an example of why.
try:
elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
self.warn("_hit_from_first_token:: no elem for scoperef: %r",
scoperef)
return (None, None)
if elem.get("ilk") == "class":
class_name = elem.get("name")
try:
ctor = elem.names[class_name]
except KeyError:
pass
else:
if "__ctor__" in ctor.get("attributes", ""):
scoperef = (scoperef[0], scoperef[1]+[class_name])
self.log("push scope to class ctor %s", scoperef)
started_in_builtin_window_scope = (scoperef[0] is self.built_in_blob
and scoperef[1] and scoperef[1][0] == self._global_var)
while 1:
try:
elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
raise EvalError("could not resolve scoperef %r: %s"
% (scoperef, ex))
try:
candidate = elem.names[token]
if "__ctor__" in candidate.get("attributes", ""):
# In JavaScript we include the constructor
# function for a (faked) class as a method.
# We must skip it here or resolution of 'this'
# in a JS class methods will always hit the ctor
# instead of the class (which is by far the
# common case).
raise KeyError("skipping JavaScript ctor")
self.log("is '%s' accessible on %s? yes", token, scoperef)
return candidate, scoperef
except KeyError:
self.log("is '%s' accessible on %s? no", token, scoperef)
scoperef = self.parent_scoperef_from_scoperef(scoperef,
started_in_builtin_window_scope)
if not scoperef:
return None, None
def _members_from_hits(self, hits):
members = set()
curr_blob = self.buf.blob_from_lang.get(self.lang, None)
for elem, scope in hits:
# In JavaScript we include the constructor function for a
# (faked) class as a method. Completion on an instance of
# this class shouldn't see the ctor.
skip_js_ctor = (elem.tag == "scope" and elem.get("ilk") == "class")
if elem.get("ilk") == "function":
# Functions have an implicit citdl type of "Function". | |
self.total_ess = self.calculate_ess(self.sinf_logw)
return sigma
def init_weights_cleanup(self, logq_func=None, dlogq_func=None):
"""Finish initializing the first importance weights (including possible local exploration)."""
self.sinf_logw = np.copy(self.log_weight)
self.importance_weights = np.copy(self.weights)
if self.init_local:
self.local_exploration(logq_func=logq_func, dlogq_func=dlogq_func,
log_thresh=np.log(self.local_thresh))
self.weighted_samples = np.append(self.weighted_samples, self.local_samples, axis=0)
self.nf_samples = np.append(self.nf_samples, self.local_samples, axis=0)
self.log_weight = np.append(self.log_weight, self.local_log_weight)
self.weights = np.append(self.weights, self.local_weights)
self.sinf_logw = np.copy(self.log_weight)
self.importance_weights = np.copy(self.weights)
def run_sinf(self, bw_factor, train_samples, val_samples=None, train_weights=None, val_weights=None,
final=False):
"""Fit SINF given a set of samples (and weights)."""
if final:
sinf_alpha = self.final_alpha
sinf_iteration = self.final_iteration
elif not final:
sinf_alpha = self.alpha
sinf_iteration = self.iteration
if (val_samples is not None and train_weights is not None and val_weights is not None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
torch.from_numpy(val_samples.astype(np.float32)),
weight_train=torch.from_numpy(train_weights.astype(np.float32)),
weight_validate=torch.from_numpy(val_weights.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
elif (val_samples is None and train_weights is not None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
weight_train=torch.from_numpy(train_weights.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
elif (val_samples is not None and train_weights is None and val_weights is None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
torch.from_numpy(val_samples.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
elif (val_samples is None and train_weights is None and val_weights is None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
return q
def get_sim_data(self, point):
"""Generate simulated data using the supplied simulator function."""
size = to_tuple(self.sim_size)
params = draw_values([*self.params], point=point, size=1)
forward_sim = self.simulator(*params)
self.sim_data = forward_sim + np.random.multivariate_normal(mu=0, cov=self.sim_data_cov)
self.sim_params = np.array([])
for p in params:
self.sim_params = np.append(self.sim_params, p)
if self.sim_params.size == 1:
self.sim_params = np.array([self.sim_params])
self.sim_params = self.sim_params.squeeze()
def simulation_init(self):
"""Initialize the model using a simulation-based init (generalization of the Ensemble Kalman filter). INCOMPLETE!"""
assert self.model_data is not None
self.data_MAP = self.get_MAP(map_method=self.sim_optim_method, start=self.start)
self.data_map_arr = np.array([])
for v in self.variables:
self.data_map_arr = np.append(self.data_map_arr, self.data_MAP[v.name])
self.data_map_arr = self.data_map_arr.squeeze()
if self.sim_start is None:
# Check this - only really want MAP of the hyper-params. Maybe can't have self.sim_start as None.
self.sim_start = self.data_MAP
self.sim_samples = np.empty((0, len(self.data_map_arr)))
self.sim_logp_diff = 1000
sim_iter = 1
while self.sim_logp_diff > self.sim_tol:
print(f'Running simulation init iteration: {sim_iter}.')
self.get_sim_data(point=self.sim_start)
set_data({self.model_data.keys(): self.sim_data}, model=self.model)
self.sim_MAP = self.get_MAP(map_method=self.sim_optim_method, start=self.sim_start)
self.sim_map_arr = np.array([])
for v in self.variables:
self.sim_map_arr = np.append(self.sim_map_arr, self.sim_MAP[v.name])
self.sim_map_arr = self.sim_map_arr.squeeze()
self.map_diff = self.sim_map_arr - self.sim_params
self.sim_update = self.data_map_arr + self.map_diff
self.sim_samples = np.append(self.sim_samples, self.sim_update)
set_data({self.model_data.keys(): self.sim_data}, model=self.model)
self.old_logp = self.get_posterior_logp(self.sim_params.reshape(-1, self.sim_params.size))
self.new_logp = self.get_posterior_logp(self.sim_update.reshape(-1, self.sim_update.size))
self.sim_logp_diff = abs(self.old_logp - self.new_logp) / max(abs(self.old_logp), abs(self.new_logp), 1)
sim_stage += 1
self.mu_map = 1.0 * self.sim_update
self.hess_inv = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
self.weighted_samples = np.random.multivariate_normal(self.mu_map, self.hess_inv, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
self.regularize_weights()
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weight_pq()
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3 +self.log_evidence_pq) )**2 ))
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def optimization_start(self):
"""Setup for optimization starting point."""
disc_vars = list(typefilter(self.variables, discrete_types))
allinmodel(self.variables, self.model)
self.start = copy.deepcopy(self.start)
if self.start is None:
self.start = self.model.test_point
else:
update_start_vals(self.start, self.model.test_point, self.model)
check_start_vals(self.start, self.model)
self.start = Point(self.start, model=self.model)
self.bij = DictToArrayBijection(ArrayOrdering(self.variables), self.start)
self.start = self.bij.map(self.start)
self.adam_logp = self.bij.mapf(self.model.fastlogp_nojac)
self.adam_dlogp = self.bij.mapf(self.model.fastdlogp_nojac(self.variables))
def update_adam(self, step, opt_state, opt_update, get_params):
"""Jax implemented ADAM update."""
params = np.asarray(get_params(opt_state)).astype(np.float64)
value = np.float64(self.adam_logp(floatX(params.squeeze())))
grads = -1 * jnp.asarray(np.float64(self.adam_dlogp(floatX(params.squeeze()))))
opt_state = opt_update(step, grads, opt_state)
update_params = np.asarray(get_params(opt_state)).astype(np.float64)
return value, opt_state, update_params
def adam_map_hess(self):
"""Use ADAM to find the MAP solution."""
self.optimization_start()
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(self.start)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_adam(i, opt_state, opt_update, get_params)
target_diff = np.abs((value - np.float64(self.adam_logp(floatX(update_params)))) /
max(value, np.float64(self.adam_logp(floatX(update_params)))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
vars = get_default_varnames(self.model.unobserved_RVs, include_transformed=True)
self.map_dict = {var.name: value for var, value in zip(vars, self.model.fastfn(vars)(self.bij.rmap(update_params.squeeze())))}
self.mu_map = np.array([])
for v in self.variables:
self.mu_map = np.append(self.mu_map, self.map_dict[v.name])
self.mu_map = self.mu_map.squeeze()
if self.mu_map.size == 1:
self.mu_map = np.array([self.mu_map])
print(f'BIJ rmap = {self.map_dict}')
print(f'ADAM map solution = {self.mu_map}')
if self.mu_map.size == 1:
self.hess_inv = 1.0 / self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size))
else:
self.hess_inv = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
if not posdef.isPD(self.hess_inv):
print(f'Autodiff Hessian is not positive semi-definite. Building Hessian with L-BFGS run starting from ADAM MAP.')
self.scipy_opt = minimize(self.optim_target_logp_nojac, x0=self.mu_map, method='L-BFGS-B',
options={'maxiter': self.optim_iter, 'ftol': self.ftol, 'gtol': self.gtol},
jac=self.optim_target_dlogp)
print(f'lbfgs Hessian inverse = {self.scipy_opt.hess_inv.todense()}')
self.hess_inv = self.scipy_opt.hess_inv.todense()
print(f'Final MAP solution = {self.mu_map}')
print(f'Inverse Hessian at MAP = {self.hess_inv}')
self.weighted_samples = np.random.multivariate_normal(self.mu_map, self.hess_inv, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.hess_inv = self.shrink_init(self.mu_map, self.hess_inv)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def local_exploration(self, logq_func=None, dlogq_func=None, log_thresh=None):
"""Perform local exploration."""
if log_thresh is None:
self.high_iw_idx = np.where(self.log_weight >= np.log(self.local_thresh))[0]
else:
self.high_iw_idx = np.where(self.log_weight >= log_thresh)[0]
self.num_local = len(self.high_iw_idx)
if self.sample_mode == 'function_approx':
self.high_iw_samples = self.weighted_samples[self.high_iw_idx, ...]
else:
self.high_iw_samples = self.nf_samples[self.high_iw_idx, ...]
self.high_log_weight = self.log_weight[self.high_iw_idx]
self.high_weights = self.weights[self.high_iw_idx]
print(f'Number of points we perform additional local exploration around = {self.num_local}')
self.local_samples = np.empty((0, np.shape(self.high_iw_samples)[1]))
self.local_log_weight = np.array([])
self.modified_log_weight = np.array([])
self.local_weights = np.array([])
self.modified_weights = np.array([])
for i, sample in enumerate(self.high_iw_samples):
sample = sample.reshape(-1, sample.size)
if self.local_grad:
if dlogq_func is None:
raise Exception('Using gradient-based exploration requires you to supply dlogq_func.')
self.log_weight_grad = self.target_dlogp(sample.astype(np.float64)) - dlogq_func(sample.astype(np.float64))
elif not self.local_grad:
if logq_func is None:
raise Exception('Gradient-free approximates gradients with finite difference. Requires you to supply logq_func.')
self.log_weight_grad = (approx_fprime(sample, self.target_logp, np.finfo(float).eps)
- approx_fprime(sample, logq_func, np.finfo(float).eps))
self.log_weight_grad = np.asarray(self.log_weight_grad).astype(np.float64)
delta = 1.0 * self.local_step_size
proposed_step_inc = sample + delta * self.log_weight_grad
line_search_iter = 0
while (logq_func(proposed_step_inc) - logq_func(sample) <= -np.log(2) or
logq_func(proposed_step_inc) - logq_func(sample) >= 0):
delta = delta / 2.0
proposed_step_inc = sample + delta * self.log_weight_grad
line_search_iter += 1
if line_search_iter >= self.max_line_search:
break
proposed_step_dec = sample - delta * self.log_weight_grad
sample_logp = self.target_logp(sample)
proposed_logp_inc = self.target_logp(proposed_step_inc)
proposed_logp_dec = self.target_logp(proposed_step_dec)
max_logp = max(sample_logp, proposed_logp_inc, proposed_logp_dec)
local_log_w_inc = (self.high_log_weight[i] + proposed_logp_inc - max_logp -
np.log(np.exp(proposed_logp_inc - max_logp) +
np.exp(sample_logp - max_logp) +
np.exp(proposed_logp_dec - max_logp)))
modif_log_w = (self.high_log_weight[i] + sample_logp - max_logp -
np.log(np.exp(proposed_logp_inc - max_logp) +
np.exp(sample_logp - max_logp) +
np.exp(proposed_logp_dec - max_logp)))
local_log_w_dec = (self.high_log_weight[i] + proposed_logp_dec - max_logp -
np.log(np.exp(proposed_logp_dec - max_logp) +
np.exp(sample_logp - max_logp) +
np.exp(proposed_logp_inc - max_logp)))
self.local_log_weight = np.append(self.local_log_weight, local_log_w_inc)
self.local_log_weight = np.append(self.local_log_weight, local_log_w_dec)
self.modified_log_weight = np.append(self.modified_log_weight, modif_log_w)
self.local_weights = np.append(self.local_weights, np.exp(local_log_w_inc))
self.local_weights = np.append(self.local_weights, np.exp(local_log_w_dec))
self.modified_weights = np.append(self.modified_weights, np.exp(modif_log_w))
self.local_samples = np.append(self.local_samples, proposed_step_inc, axis=0)
self.local_samples = np.append(self.local_samples, proposed_step_dec, axis=0)
self.log_weight[self.high_iw_idx] = self.modified_log_weight
self.weights[self.high_iw_idx] = self.modified_weights
def initialize_map_hess(self):
"""Initialize using scipy MAP optimization and Hessian."""
self.map_dict, self.scipy_opt = find_MAP(start=self.start, model=self.model, method=self.scipy_map_method, return_raw=True)
self.mu_map = []
for v in self.variables:
self.mu_map.append(self.map_dict[v.name])
self.mu_map = np.array(self.mu_map).squeeze()
if self.mu_map.size == 1:
self.mu_map = np.array([self.mu_map])
if self.init_method == 'lbfgs':
assert self.scipy_map_method == 'L-BFGS-B'
self.hess_inv = self.scipy_opt.hess_inv.todense()
if self.init_method == 'map+laplace':
if self.mu_map.size == 1:
self.hess_inv = np.array([1.0 / self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size))]).reshape(-1, 1)
else:
self.hess_inv = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
print(f'Map+Laplace mean = {self.mu_map}')
print(f'Map+Laplace covariance = {self.hess_inv}')
self.weighted_samples = np.random.multivariate_normal(self.mu_map, self.hess_inv, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_map, self.hess_inv, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = | |
<reponame>Radiian-Arts-Main/Radiian-Arts-BioSource
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Qt filechooser widget.
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** <NAME>, <NAME>
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman Community (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
A Qt based filechooser widget.
"""
import os
from PyQt5 import QtCore, QtGui, QtWidgets
import qtgui as gui
import mh
import getpath
import log
from sorter import Sorter
class ThumbnailCache(object):
aspect_mode = QtCore.Qt.KeepAspectRatioByExpanding
scale_mode = QtCore.Qt.SmoothTransformation
def __init__(self, size):
self.cache = {}
self.size = size
def __getitem__(self, name):
nstat = os.stat(name)
if name in self.cache:
stat, pixmap = self.cache[name]
if stat.st_size == nstat.st_size and stat.st_mtime == nstat.st_mtime:
return pixmap
else:
del self.cache[name]
pixmap = self.loadImage(name)
self.cache[name] = (nstat, pixmap)
return pixmap
def loadImage(self, path):
pixmap = QtGui.QPixmap(path)
width, height = self.size
pixmap = pixmap.scaled(width, height, self.aspect_mode, self.scale_mode)
pwidth = pixmap.width()
pheight = pixmap.height()
if pwidth > width or pheight > height:
x0 = max(0, (pwidth - width) / 2)
y0 = max(0, (pheight - height) / 2)
pixmap = pixmap.copy(x0, y0, width, height)
return pixmap
class FileChooserRectangle(gui.Button):
_size = (128, 128)
_imageCache = ThumbnailCache(_size)
def __init__(self, owner, file, label, imagePath):
super(FileChooserRectangle, self).__init__()
gui.Widget.__init__(self)
self.owner = owner
self.file = file
self.layout = QtWidgets.QGridLayout(self)
self.layout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
image = self._imageCache[imagePath]
self.preview = QtWidgets.QLabel()
self.preview.setPixmap(getpath.pathToUnicode(image))
self.layout.addWidget(self.preview, 0, 0)
self.layout.setRowStretch(0, 1)
self.layout.setColumnMinimumWidth(0, self._size[0])
self.layout.setRowMinimumHeight(0, self._size[1])
self.label = QtWidgets.QLabel()
self.label.setText(label)
self.label.setMinimumWidth(1)
self.layout.addWidget(self.label, 1, 0)
self.layout.setRowStretch(1, 0)
def onClicked(self, event):
self.owner.selection = self.file
self.owner.callEvent('onFileSelected', self.file)
class FlowLayout(QtWidgets.QLayout):
def __init__(self, parent = None):
super(FlowLayout, self).__init__(parent)
self._children = []
def addItem(self, item):
self._children.append(item)
def count(self):
return len(self._children)
def itemAt(self, index):
if index < 0 or index >= self.count():
return None
return self._children[index]
def takeAt(self, index):
child = self.itemAt(index)
if child is not None:
del self._children[index]
return child
def hasHeightForWidth(self):
return True
def _doLayout(self, width, real=False):
x = 0
y = 0
rowHeight = 0
for child in self._children:
size = child.sizeHint()
w = size.width()
h = size.height()
if x + w > width:
x = 0
y += rowHeight
rowHeight = 0
rowHeight = max(rowHeight, h)
if real:
child.setGeometry(QtCore.QRect(x, y, w, h))
x += w
return y + rowHeight
def heightForWidth(self, width):
return self._doLayout(width, False)
def sizeHint(self):
width = 0
height = 0
for child in self._children:
size = child.sizeHint()
w = size.width()
h = size.height()
width += w
height = max(height, h)
return QtCore.QSize(width, height)
def setGeometry(self, rect):
self._doLayout(rect.width(), True)
def expandingDirections(self):
return QtCore.Qt.Vertical
def minimumSize(self):
if not self._children:
return QtCore.QSize(0, 0)
return self._children[0].sizeHint()
class FileSort(Sorter):
"""
The default file sorting class. Can sort files on name,
creation and modification date and size.
It provides an interface for sorting files by
reading metadata from them.
"""
def __init__(self):
super(FileSort, self).__init__()
self._metaMethods = self.Methods()
self._meta = {}
self.methods = [
("name", lambda path: os.path.basename(path).lower()),
("created", os.path.getctime),
("modified", os.path.getmtime),
("size", os.path.getsize)]
def getMetaFields(self):
return self._metaMethods.fields()
def setMetaFields(self, mf):
self._metaMethods = [(field,
lambda filename: self._meta[filename][field]) for field in mf]
metaFields = property(getMetaFields, setMetaFields)
def fields(self):
"""
Override Sorter.getFields to append the MetaFields at the result.
"""
return list(super(FileSort, self).fields()) + self.metaFields
def getMethod(self, field):
"""
Override Sorter.getMethod so that it also returns the ordering
methods generated by the metadata.
"""
if field in self.methods:
return self.methods[field]
else:
return self._metaMethods[field]
def sort(self, by, filenames):
self.updateMeta(filenames)
return super(FileSort, self).sort(by, filenames)
def insert(self, by, filename, filenames):
self.updateMeta(filenames + [filename])
return super(FileSort, self).insert(by, filename, filenames)
def updateMeta(self, filenames):
for filename in filenames:
try:
if filename in self._meta and \
self._meta[filename]['modified'] >= os.path.getmtime(filename):
continue
self._meta[filename] = self.getMeta(filename)
self._meta[filename]['modified'] = os.path.getmtime(filename)
except IOError:
log.warning("Filechooser could not update metadata of file %s (IO error)" % filename)
def getMeta(self, filename):
"""
Reads and returns a dictionary with metadata associated with the
given file. To be overriden by classes using metadata.
"""
return {}
class FileSortRadioButton(gui.RadioButton):
def __init__(self, chooser, group, selected, field):
gui.RadioButton.__init__(self, group, "By %s" % field, selected)
self.field = field
self.chooser = chooser
def onClicked(self, event):
self.chooser.sortBy = self.field
self.chooser.refresh()
class TagFilter(gui.GroupBox):
def __init__(self):
super(TagFilter, self).__init__('Tag Filter [Mode : ' + self.convertModes(mh.getSetting('tagFilterMode')) + ']')
self.tags = {}
self.selectedTags = set()
def setTags(self, tags):
self.clearAll()
for tag in tags:
self.addTag(tag)
def addTag(self, tag):
tag = tag.lower()
if tag in self.tags:
return
toggle = gui.CheckBox(tag.title())
toggle.tag = tag
self.tags[tag] = toggle
@toggle.mhEvent
def onClicked(event):
self.setTagState(toggle.tag, toggle.selected)
def onShow(self, event):
super(TagFilter, self).onShow(event)
self.setTitle('Tag Filter [Mode : ' + self.convertModes(mh.getSetting('tagFilterMode')) + ']')
def showTags(self, selection=None, stickyTags=None):
if isinstance(stickyTags, str):
stickyTags = [stickyTags]
if isinstance(stickyTags, list):
stickyTags = [s.lower() for s in stickyTags if isinstance(s, str)]
for tag in stickyTags:
toggle = self.tags.get(tag)
if toggle:
self.addWidget(toggle)
if selection and tag in selection:
toggle.setChecked(True)
self.selectedTags.add(tag)
if stickyTags is None: stickyTags = set()
for tag in sorted(set(self.tags.keys()).difference(stickyTags)):
self.addWidget(self.tags.get(tag))
if selection and tag in selection:
self.tags.get(tag).setChecked(True)
self.selectedTags.add(tag)
def removeTags(self):
for toggle in self.tags.values():
self.removeWidget(toggle)
def addTags(self, tags):
for tag in tags:
self.addTag(tag)
def setTagState(self, tag, enabled):
tag = tag.lower()
if tag not in self.tags:
return
if enabled:
self.selectedTags.add(tag)
else:
self.selectedTags.remove(tag)
self.callEvent('onTagsChanged', self.selectedTags)
def clearAll(self):
for tggl in self.tags.values():
tggl.hide()
tggl.destroy()
self.selectedTags.clear()
self.tags.clear()
def getSelectedTags(self):
return self.selectedTags
def getTags(self):
return set(self.tags.keys())
def filterActive(self):
return len(self.getSelectedTags()) > 0
def filter(self, items):
mode = mh.getSetting('tagFilterMode')
if not self.filterActive():
for item in items:
item.setHidden(False)
return
for item in items:
if mode == 'OR':
if len(self.selectedTags.intersection(item.tags)) > 0:
item.setHidden(False)
else:
item.setHidden(True)
elif mode == 'AND':
if len(self.selectedTags.intersection(item.tags)) == len(self.selectedTags):
item.setHidden(False)
else:
item.setHidden(True)
elif mode == 'NOR':
if len(self.selectedTags.intersection((item.tags))) > 0:
item.setHidden(True)
else:
item.setHidden(False)
elif mode == 'NAND':
if len(self.selectedTags.intersection(item.tags)) == len(self.selectedTags):
item.setHidden(True)
else:
item.setHidden(False)
def convertModes(self, mode):
convmodes = {'NOR': 'NOT OR',
'NAND': 'NOT AND'}
return convmodes.get(mode, mode)
def labelSwitch(self):
self.setTitle('Tag Filter [Mode : ' + self.convertModes(mh.getSetting('tagFilterMode')) + ']')
class FileHandler(object):
def __init__(self):
self.fileChooser = None
def refresh(self, files):
for file in files:
label = getpath.pathToUnicode( os.path.basename(file) )
if len(self.fileChooser.extensions) > 0:
label = os.path.splitext(label)[0].replace('_', ' ').capitalize()
label = label[0].capitalize() + label[1:]
self.fileChooser.addItem(file, label, self.getPreview(file))
def getSelection(self, item):
return item.file
def matchesItem(self, listItem, item):
return abspath(listItem.file) == abspath(item)
def matchesItems(self, listItem, items):
return abspath(listItem.file) in [abspath(i) for i in items]
def setFileChooser(self, fileChooser):
self.fileChooser = fileChooser
def getPreview(self, filename):
fc = self.fileChooser
if not filename:
return fc.notFoundImage
preview = filename
if preview and fc.previewExtensions:
#log.debug('%s, %s', fc.extension, fc.previewExtensions)
preview = os.path.splitext(filename)[0]+ '.' + fc.previewExtensions[0]
i = 1
while not os.path.exists(preview) and i < len(fc.previewExtensions):
preview = os.path.splitext(filename)[0] + '.' + fc.previewExtensions[i]
i = i + 1
if not os.path.isfile(preview) and fc.notFoundImage:
# preview = os.path.join(fc.path, fc.notFoundImage)
# TL: full filepath needed, so we don't look into user dir.
preview = fc.notFoundImage
return preview
class TaggedFileLoader(FileHandler):
"""
Load files with tags, allowing to filter them with a tag filter.
Requires a pointer to the library that handles items in the filechooser.
This library object needs to implement a getTags(filename) method.
"""
def __init__(self, library, useNameTags=False):
super(TaggedFileLoader, self).__init__()
self.library = library
self.useNameTags = useNameTags
def refresh(self, files):
"""
Load tags from mhclo file.
"""
oldSelection = self.fileChooser.getSelectedTags().copy()
self.fileChooser.removeTags()
for file in files:
label=''
tags = self.library.getTags(filename = file)
if self.useNameTags:
name = self.library.getName(filename = file)
label = name
if not label:
label = getpath.pathToUnicode(os.path.basename(file))
if len(self.fileChooser.extensions) > 0:
label = os.path.splitext(label)[0].replace('_', ' ')
label = label[0].capitalize() + label[1:]
self.fileChooser.addItem(file, label, self.getPreview(file), tags)
self.fileChooser.showTags(oldSelection)
def setNameTagsUsage(self, useNameTags=False):
self.useNameTags = useNameTags
class MhmatFileLoader(FileHandler):
def __init__(self):
super(MhmatFileLoader, self).__init__()
def getPreview(self, filename):
# TODO this makes filechooser loading quite slow for materials without a thumbnail, but it does provide a preview
thumb = | |
df_alloy['star'][row]
alloy_mass = df_alloy['alloy mass'][row]
if star_name == star_name2:
feo_moles = feo_in / feo_molwt
na2o_moles = na2o_in / na2o_molwt
mgo_moles = mgo_in / mgo_molwt
al2o3_moles = al2o3_in / al2o3_molwt
sio2_moles = sio2_in / sio2_molwt
cao_moles = cao_in / cao_molwt
nio_moles = nio_in / nio_molwt
tio2_moles = tio2_in / tio2_molwt
cr2o3_moles = cr2o3_in / cr2o3_molwt
in2_header = "2,feo,na2o,mgo,al2o3,sio2,cao,nio,tio2,cr2o3"
in2 = ",{},{},{},{},{},{},{},{},{}".format(feo_moles, na2o_moles, mgo_moles, al2o3_moles, sio2_moles, cao_moles, nio_moles, tio2_moles, cr2o3_moles)
bsp_debug.write("{}\n{}\n".format(in2_header, in2))
fe_moles = feo_moles * num_feo_cations
na_moles = na2o_moles * num_na2o_cations
mg_moles = mgo_moles * num_mgo_cations
al_moles = al2o3_moles * num_al2o3_cations
si_moles = sio2_moles * num_sio2_cations
ca_moles = cao_moles * num_cao_cations
ni_moles = nio_moles * num_nio_cations
ti_moles = tio2_moles * num_tio2_cations
cr_moles = cr2o3_moles * num_cr2o3_cations
in3_header = "3,fe,na,mg,al,si,ca,ni,ti,cr"
in3 = ",{},{},{},{},{},{},{},{},{}".format(fe_moles, na_moles, mg_moles, al_moles,
si_moles, ca_moles, ni_moles, ti_moles, cr_moles)
bsp_debug.write("{}\n{}\n".format(in3_header, in3))
fe_mass = fe_moles * fe_atwt
na_mass = na_moles * na_atwt
mg_mass = mg_moles * mg_atwt
al_mass = al_moles * al_atwt
si_mass = si_moles * si_atwt
ca_mass = ca_moles * ca_atwt
ni_mass = ni_moles * ni_atwt
ti_mass = ti_moles * ti_atwt
cr_mass = cr_moles * cr_atwt
in4_header = "4,fe,na,mg,al,si,ca,ni,ti,cr"
in4 = ",{},{},{},{},{},{},{},{},{}".format(fe_mass, na_mass, mg_mass, al_mass,
si_mass, ca_mass, ni_mass, ti_mass, cr_mass)
bsp_debug.write("{}\n{}\n".format(in4_header, in4))
alloy_subt_ni_mass = alloy_mass - ni_mass
if alloy_subt_ni_mass < 0:
print("Ni MASS ERROR!")
sys.exit()
else:
pass
new_mass_fe = fe_mass - alloy_subt_ni_mass
if new_mass_fe < 0:
print("Fe MASS ERROR!")
sys.exit()
remaining_moles_fe = new_mass_fe / fe_atwt
remaining_moles_feo = remaining_moles_fe * num_feo_cations
remaining_mass_feo = remaining_moles_feo * feo_molwt
in5_header = "5,alloy_but_ni_mass,new_mass_fe,remaining_moles_fe,remaining_moles_feo,remaining_mass_feo"
in5 = ",{},{},{},{},{}".format(alloy_subt_ni_mass, new_mass_fe, remaining_moles_fe, remaining_moles_feo,
remaining_mass_feo)
bsp_debug.write("{}\n{}\n".format(in5_header, in5))
unnormalized_sum = (remaining_mass_feo + na2o_in + mgo_in + al2o3_in + sio2_in + cao_in +
tio2_in + cr2o3_in)
norm_feo = remaining_mass_feo / unnormalized_sum * 100.0
norm_na2o = na2o_in / unnormalized_sum * 100.0
norm_mgo = mgo_in / unnormalized_sum * 100.0
norm_al2o3 = al2o3_in / unnormalized_sum * 100.0
norm_sio2 = sio2_in / unnormalized_sum * 100.0
norm_cao = cao_in / unnormalized_sum * 100.0
norm_tio2 = tio2_in / unnormalized_sum * 100.0
norm_cr2o3 = cr2o3_in / unnormalized_sum * 100.0
norm_sum = norm_feo + norm_na2o + norm_mgo + norm_al2o3 + norm_sio2 + norm_cao + norm_tio2 + norm_cr2o3
in6_header = "6,feo,na2o,mgo,al2o3,sio2,cao,tio2,cr2o3,unnorm_sum,norm_sum"
in6 = ",{},{},{},{},{},{},{},{},{},{}".format(norm_feo, norm_na2o, norm_mgo, norm_al2o3,
norm_sio2, norm_cao, norm_tio2, norm_cr2o3, unnormalized_sum, norm_sum)
bsp_debug.write("{}\n{}\n".format(in6_header, in6))
bsp_comp = "{},{},{},{},{},{},{},{},{}".format(star_name, norm_feo, norm_na2o, norm_mgo, norm_al2o3,
norm_sio2, norm_cao, norm_tio2, norm_cr2o3)
bsp_chemfile.write("{}\n".format(bsp_comp))
# print(norm_feo)
# print(norm_sum)
#
# if norm_sum != 100.0:
# print("ERROR! NORMALIZED SUM IS NOT 100.0!")
# sys.exit()
title = "Title: {}".format(star_name)
bsp_feo = "Initial Composition: FeO {}".format(norm_feo)
bsp_na2o = "Initial Composition: Na2O {}".format(norm_na2o)
bsp_mgo = "Initial Composition: MgO {}".format(norm_mgo)
bsp_al2o3 = "Initial Composition: Al2O3 {}".format(norm_al2o3)
bsp_sio2 = "Initial Composition: SiO2 {}".format(norm_sio2)
bsp_cao = "Initial Composition: CaO {}".format(norm_cao)
bsp_tio2 = "Initial Composition: TiO2 {}".format(norm_tio2)
bsp_cr2o3 = "Initial Composition: Cr2O3 {}".format(norm_cr2o3)
init_temp = 'Initial Temperature: 2000'
final_temp = "Final Temperature: 800"
inc_temp = "Increment Temperature: -5"
init_press = "Initial Pressure: 10000"
final_press = "Final Pressure: 10000"
dpdt = "dp/dt: 0"
mode = "Mode: Fractionate Solids"
mode2 = "Mode: Isobaric"
melts_morb_input_file_vars = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
title,
bsp_feo, bsp_na2o, bsp_mgo, bsp_al2o3, bsp_sio2, bsp_cao, bsp_tio2, bsp_cr2o3,
init_temp, init_temp, final_temp, inc_temp, init_press, final_press, dpdt, mode, mode2)
morb_outfile = open("{}_MELTS_{}_INFILE.txt".format(star_name, "MORB"), 'w')
morb_outfile.write(melts_morb_input_file_vars)
morb_outfile.close()
fdir = os.getcwd() + "/{}_MELTS_{}_INFILE.txt".format(star_name, "MORB")
tdir = home_dir[0] + "/MELTS_MORB_Input_Files/{}_MELTS_{}_INFILE.txt".format(star_name, "MORB")
shutil.move(fdir, tdir)
except:
pass
bsp_debug.close()
bsp_chemfile.close()
hefestofilewriter_bsp(bulkfile=(home_dir[0] + "/{}_BSP_Composition.csv".format(infilename[:-4])), infilename=infilename)
runmelts_morb(infile_directory=(home_dir[0] + "/MELTS_MORB_Input_Files"), inputfilename=infilename[:-4])
def runmelts_morb(infile_directory, inputfilename):
if "{}_Completed_MORB_MELTS_Files".format(inputfilename) in os.listdir(os.getcwd()):
shutil.rmtree("{}_Completed_MORB_MELTS_Files".format(inputfilename))
os.mkdir("{}_Completed_MORB_MELTS_Files".format(inputfilename))
else:
os.mkdir("{}_Completed_MORB_MELTS_Files".format(inputfilename))
for i in os.listdir(infile_directory):
os.chdir(home_dir[0])
if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()):
os.remove("alphaMELTS_tbl.txt")
else:
pass
shutil.copy((infile_directory + "/" + i), (home_dir[0] + "/" + i))
print("[~] Running MORB calculations for: {}".format(i[:-20]))
p = subprocess.Popen(["run_alphamelts.command", "-f", "MORB_Env_File"], stdin=subprocess.PIPE)
t = Timer(300, p.kill)
t.start()
print("\nTimeout timer started. 300 seconds until the loop continues...\n")
p.communicate(input=b"\n".join(
[b"1", i, b"8", b"alloy-liquid", b"0", b"x", b"5", b"3", b"+0.4", b"2", b"1400", b"10000", b"10", b"1",
b"3", b"1", b"liquid", b"1", b"0.05", b"0", b"10", b"0", b"4", b"0"]))
t.cancel()
if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()):
oldname = "alphaMELTS_tbl.txt"
newname = i[:-20] + "_MORB_OUTPUT"
os.rename(oldname, newname)
shutil.move(newname, home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename))
os.remove(i)
os.chdir(home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename))
csv_file_name = newname + ".csv"
with open(newname, 'rb') as infile, open(csv_file_name, 'wb') as outfile:
in_txt = csv.reader(infile, delimiter=" ")
out_csv = csv.writer(outfile)
out_csv.writerows(in_txt)
infile.close()
outfile.close()
os.remove(newname)
print("[~] {} MORB calculation processed!".format(i[:-17]))
else:
print("[X] {} MORB calculation FAILED!".format(i[:-20]))
pass
if i in home_dir[0]:
os.remove(home_dir[0] + "/{}".format(i))
else:
pass
scrapemorb(infiledirectory=(home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename)), infilename=inputfilename)
def scrapebsp2(infiledirectory, inputfilename):
if "alloy_mass.csv" in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/alloy_mass.csv")
else:
pass
alloy_mass_outfile = open(home_dir[0] + "/alloy_mass.csv", 'a')
alloy_mass_outfile.write("{},{}\n".format("star", "alloy mass"))
os.chdir(infiledirectory)
for i in os.listdir(os.getcwd()):
try:
os.chdir(infiledirectory)
if enumerate(i, 1) >= 100:
alloy_abundance = []
with open(i, 'r') as infile:
reader = csv.reader(infile)
row1 = next(reader)
star_name = row1[1]
alloy_abundance.append(star_name)
for num, line in enumerate(reader, 1):
if "Phase" in line:
csv_list = list(reader)
alloy_index = csv_list[0].index("alloy-solid_0")
for row in csv_list[1:]:
if not row == []:
a = row[alloy_index]
x = str(float(a))
alloy_abundance.append(x)
else:
break
else:
pass
os.chdir(home_dir[0])
# print(alloy_abundance[1:])
alloy_abundance_nums = []
for z in alloy_abundance[1:]:
alloy_abundance_nums.append(float(z))
alloy_abundance_sum = sum(alloy_abundance_nums)
print("Alloy abundance for {}: {}".format(alloy_abundance[0], alloy_abundance_sum))
alloy_mass_outfile.write("{},{}\n".format(alloy_abundance[0], alloy_abundance_sum))
except:
pass
else:
pass
def hefestofilewriter_bsp(bulkfile, infilename):
os.chdir(home_dir[0])
infilename = infilename[:-4]
if os.path.exists("{}_BSP_HeFESTo_Input_Files".format(infilename)):
shutil.rmtree("{}_BSP_HeFESTo_Input_Files".format(infilename))
else:
pass
os.mkdir("{}_BSP_HeFESTo_Input_Files".format(infilename))
bulkfile_df = pd.read_csv(bulkfile)
for row in bulkfile_df.index:
try:
star = bulkfile_df["Star"][row]
si = bulkfile_df["SiO2"][row]
mg = bulkfile_df["MgO"][row]
fe = bulkfile_df["FeO"][row]
ca = bulkfile_df["CaO"][row]
al = bulkfile_df["Al2O3"][row]
na = bulkfile_df["Na2O"][row]
hefesto_bsp_file = open("{}_BSP_HeFESTo_Infile.txt".format(star), 'a')
format_of_file = "0,20,80,1600,0,-2,0\n6,2,4,2\noxides\nSi {} 5.39386 0\nMg {} 2.71075 0\n" \
"Fe {} .79840 0\nCa {} .31431 0\nAl {} .96680 0\n" \
"Na {} .40654 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\nhc\n" \
"phase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\njd\n" \
"phase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\nmgwa\nfewa\n" \
"phase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\nphase ppv\n0\nmppv\n" \
"fppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n1\nqtz\nphase coes\n0\ncoes\n" \
"phase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n0\nneph".format(si,
mg, fe, ca, al, na)
hefesto_bsp_file.write(format_of_file)
hefesto_bsp_file.close()
fdir = home_dir[0] + "/{}".format("{}_BSP_HeFESTo_Infile.txt".format(star))
tdir = home_dir[0] + "/{}/{}".format("{}_BSP_HeFESTo_Input_Files".format(infilename),
"{}_BSP_HeFESTo_Infile.txt".format(star))
shutil.move(fdir, tdir)
except:
pass
print("\n[~] BSP HeFESTo input files available in '{}'".format("{}_BSP_HeFESTo_Input_Files".format(infilename)))
def hefestofilewriter_morb(bulkfile, infilename):
os.chdir(home_dir[0])
if os.path.exists("{}_MORB_HeFESTo_Input_Files".format(infilename)):
shutil.rmtree("{}_MORB_HeFESTo_Input_Files".format(infilename))
else:
pass
os.mkdir("{}_MORB_HeFESTo_Input_Files".format(infilename))
bulkfile_df = pd.read_csv(bulkfile)
for row in bulkfile_df.index:
try:
star = bulkfile_df["Star"][row]
si = bulkfile_df["SiO2"][row]
mg = bulkfile_df["MgO"][row]
fe = bulkfile_df["FeO"][row]
ca = bulkfile_df["CaO"][row]
al = bulkfile_df["Al2O3"][row]
na = bulkfile_df["Na2O"][row]
hefesto_morb_file = open("{}_MORB_HeFESTo_Infile.txt".format(star), 'a')
format_of_file = "0,20,80,1200,0,-2,0\n6,2,4,2\noxides\nSi {} 5.33159 0\n" \
"Mg {} 1.37685 0\nFe {} .55527 0\n" \
"Ca {} 1.33440 0\nAl {} 1.82602 0\n" \
"Na {} 0.71860 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\n" \
"hc\nphase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\n" \
"jd\nphase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\n" \
"mgwa\nfewa\nphase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\n" \
"phase ppv\n0\nmppv\nfppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n" \
"1\nqtz\nphase coes\n0\ncoes\nphase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n" \
"0\nneph".format(si, mg, fe, ca, al, na)
hefesto_morb_file.write(format_of_file)
hefesto_morb_file.close()
fdir = home_dir[0] + "/{}".format("{}_MORB_HeFESTo_Infile.txt".format(star))
tdir = home_dir[0] + "/{}/{}".format("{}_MORB_HeFESTo_Input_Files".format(infilename),
"{}_MORB_HeFESTo_Infile.txt".format(star))
shutil.move(fdir, tdir)
except:
pass
print("\n[~] Crust HeFESTo input files available in '{}'".format("{}_MORB_HeFESTo_Input_Files".format(infilename)))
consol_hefestofolders(infilename=infilename)
def consol_hefestofolders(infilename):
print('\n[~] Consolidating HeFESTo input file folders...')
bsp_folder = "/{}_BSP_HeFESTo_Input_Files".format(infilename)
morb_folder = "/{}_MORB_HeFESTo_Input_Files".format(infilename)
print("[~] Got HeFESTo BSP folder '{}'".format(bsp_folder))
print("[~] Got HeFESTo Crust folder '{}'".format(morb_folder))
if "{}_HeFESTo_Input_Files".format(infilename) in os.listdir(os.getcwd()):
shutil.rmtree("{}_HeFESTo_Input_Files".format(infilename))
else:
pass
consol_folder = (home_dir[0] + "/{}_HeFESTo_Input_Files".format(infilename))
print("\n[~] Created consolidated HeFESTo input file folder: {}".format(consol_folder))
fdir_bsp = (home_dir[0] + bsp_folder)
fdir_morb = (home_dir[0] + morb_folder)
tdir_bsp = consol_folder + bsp_folder
tdir_morb = consol_folder + morb_folder
shutil.move(fdir_bsp, tdir_bsp)
shutil.move(fdir_morb, tdir_morb)
print("\n[~] HeFESTo Input files are now available in {} for transfer to a HeFESTo VM".format(consol_folder))
print("\n[~] Please move this script and folder '{}' to a working HeFESTo directory!".format(consol_folder))
print("[~] Exiting the Exoplanet Pocketknife's active processes...")
time.sleep(6)
initialization()
def runhefesto(infiledir, actual_run, runname):
os.chdir(home_dir[0])
if actual_run is True:
# try:
if 'main' not in os.listdir(os.getcwd()):
print("[X] ERROR! HeFESTo's 'main' not detected in the working directory!\n")
time.sleep(4)
initialization()
else:
print("[~] HeFESTo detected in the working directory!\n")
pass
# os.chdir(home_dir[0])
# print("\nPlease enter the name of your BSP HeFESTo input .csv sheet:")
# hefesto_input_bsp = input(">>> ")
# if hefesto_input_bsp in os.listdir(os.getcwd()):
# print("[~] {} has been found in the working directory!".format(hefesto_input_bsp))
# else:
# print("[X] {} has NOT been found in the working directory!".format(hefesto_input_bsp))
# time.sleep(4)
# initialization()
# print("\nPlease enter the name of your crust HeFESTo input .csv sheet:")
# hefesto_input_morb = input(">>> ")
# if hefesto_input_morb in os.listdir(os.getcwd()):
# print("[~] {} | |
<gh_stars>1-10
# Modifications, Copyright 2021 KitchenShift
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dm_control import mujoco
from dm_control import mjcf
from dm_control.mujoco import engine
from dm_control.utils import inverse_kinematics
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import tempfile
from .adept_envs.simulation.renderer import DMRenderer
from .adept_envs.simulation.sim_robot import _patch_mjlib_accessors
from .mujoco.robot import Robot
from .mujoco.mocap_utils import reset_mocap2body_xpos, reset_mocap_welds
from .mujoco.rotations import euler2quat, mat2euler, quat2euler, mat2quat, quat_mul
from .mujoco.obs_utils import get_obs_ee, get_obs_forces
from .constants import CAMERAS, OBS_ELEMENT_INDICES, OBS_ELEMENT_GOALS, FRANKA_INIT_QPOS, BONUS_THRESH
from .utils import make_rng
class Kitchen_v1(gym.Env):
"""Kitchen manipulation environment in Mujoco. Ported from relay-policy-learning/adept_envs."""
TASK_ELEMENTS = ['microwave', 'kettle', 'bottomknob', 'switch']
REMOVE_TASKS_WHEN_COMPLETE = True
TERMINATE_ON_TASK_COMPLETE = True
def __init__(
self,
ctrl_mode='absvel',
compensate_gravity=True,
noslip_off=False,
frame_skip=40,
camera_ids=[0, 1],
height=128,
width=128,
with_obs_ee=True,
with_obs_forces=True,
robot='franka2',
rot_use_euler=False,
render_size=None,
noise_ratio=0.1,
robot_cache_noise_ratio=0.0,
object_pos_noise_amp=0.1,
object_vel_noise_amp=0.1,
robot_obs_extra_noise_amp=0.1,
init_random_steps_set=None,
init_perturb_robot_ratio=None,
init_perturb_object_ratio=None,
rng_type='legacy',
):
self.tasks_to_complete = set(self.TASK_ELEMENTS)
self.goal = self._get_task_goal()
self.ctrl_mode = ctrl_mode
self.frame_skip = frame_skip
# see https://github.com/ARISE-Initiative/robosuite/blob/e0982ca9000fd373bc60781ec9acd1ef29de5beb/robosuite/models/grippers/gripper_tester.py#L195
# https://github.com/deepmind/dm_control/blob/87e046bfeab1d6c1ffb40f9ee2a7459a38778c74/dm_control/locomotion/soccer/boxhead.py#L36
# http://www.mujoco.org/forum/index.php?threads/gravitational-matrix-calculation.3404/
# https://github.com/openai/mujoco-py/blob/4830435a169c1f3e3b5f9b58a7c3d9c39bdf4acb/mujoco_py/mjpid.pyx#L243
self.compensate_gravity = compensate_gravity
self.noslip_off = noslip_off
self.with_obs_ee = with_obs_ee
self.with_obs_forces = with_obs_forces
self.rot_use_euler = rot_use_euler # affects format of with_obs_ee
self.robot_name = robot
self.noise_ratio = noise_ratio # global noise multiplier, if < 1 then reduces noise
# be careful when using robot_cache_noise_ratio, since this will affect noise
# of obs used by the robot controller
self.robot_cache_noise_ratio = robot_cache_noise_ratio
self.object_pos_noise_amp = object_pos_noise_amp
self.object_vel_noise_amp = object_vel_noise_amp
self.robot_obs_extra_noise_amp = robot_obs_extra_noise_amp
self.init_random_steps_set = init_random_steps_set
self.init_perturb_robot_ratio = init_perturb_robot_ratio
self.init_perturb_object_ratio = init_perturb_object_ratio
self.rng_type = rng_type
self.model_dir = os.path.join(os.path.dirname(__file__), 'assets/')
self.model_path = os.path.join(self.model_dir, 'kitchen.xml')
self.model_xml = open(self.model_path, 'r').read()
self.render_size = render_size
self.camera_ids = camera_ids
self.height = height;
self.width = width;
# if render_size is not None:
# # swap in render buffer size
# height = self.render_size[0]
# width = self.render_size[1]
# self.model_xml = self.model_xml.replace(
# '<global offwidth="640" offheight="480" />',
# f'<global offwidth="{height}" offheight="{width}" />',
# )
# # NOTE: if using larger render sizes, probably want to scale up shadow quality as well
if self.noslip_off:
self.model_xml = self.model_xml.replace(
'<option timestep="0.002" cone="elliptic" impratio="2" noslip_iterations="20"/>',
'<option timestep="0.002"/>',
)
if self.robot_name == 'franka':
pass
elif self.robot_name == 'franka2':
self.model_xml = self.model_xml.replace(
'<include file="franka/actuator0.xml"/>',
'<include file="franka2/actuator0.xml"/>',
)
self.model_xml = self.model_xml.replace(
'<include file="franka/franka_panda.xml"/>',
'<include file="franka2/franka_panda.xml"/>',
)
elif self.robot_name == 'xarm7':
raise NotImplementedError
else:
raise ValueError(f"Unsupported robot: {self.robot_name}")
# mujoco.Physics.from_xml_string messes up asset paths
# mjcf.from_xml_string doesn't seem to support the same xml parsing as the actual mjlib
# to circumvent these issues, in order to dynamically change the env and reload the xml,
# we write the xml string to a temporary xml file located in self.model_dir
#
# self.sim = mujoco.Physics.from_xml_string(self.model_xml)
# self.sim = mjcf.from_xml_string(model_xml, model_dir=self.model_dir)
# self.sim = mujoco.Physics.from_xml_path(self.model_path)
# _patch_mjlib_accessors(self.model, self.sim.data, True)
# print(self.model_xml)
self.load_sim(self.model_xml)
self.seed()
# self.init_qpos = self.data.qpos.ravel().copy()
self.init_qvel = self.data.qvel.ravel().copy()
self.set_init_qpos(FRANKA_INIT_QPOS.copy())
# self.init_qvel = self.sim.model.key_qvel[0].copy() # this should be np.zeros(29)
if self.ctrl_mode == 'absvel':
action_dim = self.N_DOF_ROBOT
self.act_mid = np.zeros(action_dim)
self.act_amp = 2.0 * np.ones(action_dim)
elif self.ctrl_mode == 'abspos':
action_dim = self.N_DOF_ROBOT
self.act_mid = np.zeros(action_dim)
self.act_amp = 3.0 * np.ones(action_dim)
elif self.ctrl_mode == 'relmocapik':
# with mocapik, we follow robogym and robosuite by spawning a separate simulator
self.mocapid = None # set later since sim is not yet initialized
self.initial_mocap_quat = np.array([-0.65269804, 0.65364932, 0.27044485, 0.27127002])
self.fix_gripper_quat = False
self.binary_gripper = False
pos_action_dim = 3
rot_action_dim = 3 if self.rot_use_euler else 4
gripper_action_dim = 1 if self.binary_gripper else 2
action_dim = pos_action_dim + rot_action_dim + gripper_action_dim
self.pos_range = 0.075
self.rot_range = 0.075
elif self.ctrl_mode == 'absmocapik':
self.mocapid = None # set later since sim is not yet initialized
action_dim = self.N_DOF_ROBOT # xyz (3) + quat (4) + gripper (2) == 9
self.act_mid = np.zeros(action_dim)
self.act_amp = 3.0 * np.ones(action_dim)
elif self.ctrl_mode == 'mixmocapik':
self.mocapid = None # set later since sim is not yet initialized
action_dim = self.N_DOF_ROBOT # xyz (3) + quat (4) + gripper (2) == 9
self.act_mid = np.zeros(action_dim)
self.act_amp = 2.0 * np.ones(action_dim)
self.pos_range = 0.075
else:
raise ValueError(f"Unsupported ctrl_mode: {self.ctrl_mode}")
self.action_space = gym.spaces.Box(-1.0, 1.0, shape=(action_dim,))
obs_space = {}
obs_space['robot_qp'] = spaces.Box(low=-np.inf, high=np.inf, shape=(9,), dtype=np.float32)
obs_space['ee_qp'] = spaces.Box(low=-np.inf, high=np.inf, shape=(7,), dtype=np.float32)
obs_space['ee_forces'] = spaces.Box(low=-np.inf, high=np.inf, shape=(12,), dtype=np.float32)
for key in ['camera_{}'.format(idx) for idx in camera_ids] + ['camera_gripper']:
obs_space[key + "_rgb"] = gym.spaces.Box(
low=0,
high=255,
shape=(height, width, 3),
dtype=np.uint8,
)
obs_space[key + "_depth"] = gym.spaces.Box(
low=0,
high=255,
shape=(height, width),
dtype=np.uint8,
)
self.observation_space = spaces.Dict(obs_space)
def _create_sim(self, xml_string):
with tempfile.NamedTemporaryFile(mode='w+', dir=self.model_dir) as f:
f.write(xml_string)
f.flush()
sim = mujoco.Physics.from_xml_path(f.name)
return sim
def load_sim(self, xml_string):
self.sim = self._create_sim(xml_string)
_patch_mjlib_accessors(self.model, self.sim.data, True)
self.N_DOF_ROBOT = self.sim.data.model.nu
self.N_DOF_OBJECT = self.sim.data.model.nq - self.N_DOF_ROBOT
self.robot = Robot(self.N_DOF_ROBOT, actuator_specs=self.sim.data.model.actuator_user)
if 'mocap' in self.ctrl_mode:
self._create_solver_sim(xml_string)
self.create_renderer()
self.add_cameras()
def _create_solver_sim(self, xml_string):
from lxml import etree as ET
# returns Element rather than ElementTree like ET.parse, so don't need to getroot()
parser = ET.XMLParser(remove_blank_text=True, remove_comments=True)
domain_model_xml_tree = ET.fromstring(xml_string, parser=parser)
worldbody = domain_model_xml_tree.find('worldbody')
if self.robot_name == 'franka2':
fn = f'franka2/actuator0.xml'
n = domain_model_xml_tree.find(f'include[@file="{fn}"]')
n.attrib['file'] = 'franka2/teleop_actuator.xml'
equality = """
<equality>
<!-- original constraints -->
<!-- <weld body1="vive_controller" body2="world" solref="0.02 1" solimp=".7 .95 0.050"/> -->
<!-- <weld body1="vive_controller" body2="panda0_link7" solref="0.02 1" solimp="0.7 0.95 0.050"/> -->
<!-- Set the impedance to constant 0.9, with width 0, seems to reduce penetration (ie. gripper finger w/ microwave handle) -->
<weld body1="vive_controller" body2="panda0_link7" solref="0.02 1" solimp="0.7 0.9 0"/>
<!-- from franka_panda_teleop.xml-->
<!-- <weld body1="vive_controller" body2="panda0_link7" solref="0.01 1" solimp=".25 .25 0.001"/> -->
<!-- from Abhishek's code -->
<!-- <weld body1="vive_controller" body2="panda0_link7" solref="0.02 1" solimp=".4 .85 .1"/> -->
</equality>
"""
equality = ET.fromstring(equality, parser=parser)
i = domain_model_xml_tree.getchildren().index(worldbody)
domain_model_xml_tree.insert(i - 1, equality)
controller = """
<!-- Mocap -->
<!-- <body name="vive_controller" mocap="true" pos="0 0 2.89" euler="-1.57 0 -.785"> -->
<body name="vive_controller" mocap="true" pos="-0.440 -0.092 2.026" euler="-1.57 0 -.785">
<geom type="box" group="2" pos='0 0 .142' size="0.02 0.10 0.03" contype="0" conaffinity="0" rgba=".9 .7 .95 0" euler="0 0 -.785"/>
</body>
"""
controller = ET.fromstring(controller, parser=parser)
worldbody.insert(0, controller)
# for efficiency, delete some of the unneeded things
# texplane, MatPlane, light, floor, xaxis, yaxis, cylinder
else:
raise NotImplementedError
domain_model_xml = ET.tostring(
domain_model_xml_tree,
encoding='utf8',
method='xml',
pretty_print=True,
).decode('utf8')
self.solver_sim = self._create_sim(domain_model_xml)
_patch_mjlib_accessors(self.solver_sim.model, self.solver_sim.data, True)
def add_cameras(self, camera_id = None):
if camera_id is not None:
self.camera_ids.append(camera_id)
self.cameras = dict()
for camera_id in self.camera_ids:
camera = engine.MovableCamera(self.sim, height=self.height, width=self.width)
camera.set_pose(**CAMERAS[camera_id])
self.cameras['camera_{}'.format(camera_id)] = camera
self.cameras['camera_gripper'] = engine.Camera(self.sim, height=self.height,
width=self.width, camera_id='gripper_camera_rgb')
def create_renderer(self):
self.renderer = DMRenderer(self.sim,
camera_settings=dict(distance=2.9,
lookat=[-0.05, 0.5, 2.0],
azimuth=90,
elevation=-50))
if hasattr(self, 'solver_sim'):
self.solver_sim_renderer = DMRenderer(self.solver_sim,
camera_settings=dict(distance=2.9,
lookat=[-0.05, 0.5, 2.0],
azimuth=90,
elevation=-50))
def set_init_qpos(self, qpos):
self.init_qpos = qpos
def set_noise_ratio(self, noise_ratio, robot_cache_noise_ratio=None):
self.noise_ratio = noise_ratio
self.robot_cache_noise_ratio = robot_cache_noise_ratio
def set_init_noise_params(
self,
init_random_steps_set,
init_perturb_robot_ratio,
init_perturb_object_ratio,
rng_type,
):
self.init_random_steps_set = init_random_steps_set
self.init_perturb_robot_ratio = init_perturb_robot_ratio
self.init_perturb_object_ratio = init_perturb_object_ratio
if rng_type != self.rng_type:
self.rng_type = rng_type
self.seed(seed=self._base_seed)
@property
def data(self):
return self.sim.data
@property
def model(self):
return self.sim.model
@property
def physics(self):
return self.sim
def _get_obs_dict(self, noise_ratio='default', robot_cache_obs=False):
if noise_ratio == 'default':
noise_ratio = self.noise_ratio
# noise_ratio = None;
# Gather simulated observation
robot_qp = self.sim.data.qpos[: self.N_DOF_ROBOT].copy()
robot_qv = self.sim.data.qvel[: self.N_DOF_ROBOT].copy()
obj_qp = self.sim.data.qpos[-self.N_DOF_OBJECT :].copy()
obj_qv = self.sim.data.qvel[-self.N_DOF_OBJECT :].copy()
t = self.sim.data.time
# Simulate observation noise
if noise_ratio is not None:
# currently, robot noise is specified per actuator
# while object noise is constant across different objects
robot_qp += (
noise_ratio
* self.robot.pos_noise_amp[: self.N_DOF_ROBOT]
* self.np_random.uniform(low=-1.0, high=1.0, size=self.N_DOF_ROBOT)
)
robot_qv += (
noise_ratio
* self.robot.vel_noise_amp[: self.N_DOF_ROBOT]
* self.np_random.uniform(low=-1.0, high=1.0, size=self.N_DOF_ROBOT)
)
obj_qp += (
noise_ratio
* self.object_pos_noise_amp
* self.np_random.uniform(low=-1.0, high=1.0, size=self.N_DOF_OBJECT)
)
obj_qv += (
noise_ratio
* self.object_vel_noise_amp
* self.np_random.uniform(low=-1.0, high=1.0, size=self.N_DOF_OBJECT)
)
obs_dict = {
'robot_qp': robot_qp,
'robot_qv': robot_qv,
'obj_qp': obj_qp,
'obj_qv': obj_qv,
}
# | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import hmac
import base64
import hashlib
import datetime
import colony
from . import parser
from . import exceptions
DEFAULT_CHARSET = "utf-8"
""" The default charset """
DEFAULT_EXPIRES_IN = "3600"
""" The default expires in """
DEFAULT_SIGNED_NAMES = (
"op_endpoint",
"return_to",
"response_nonce",
"assoc_handle",
"claimed_id",
"identity"
)
""" The default signed names """
DEFAULT_SIGNED_ITEMS = (
"provider_url",
"return_to",
"response_nonce",
"association_handle",
"claimed_id",
"identity"
)
""" The default signed items """
TRUE_VALUE = "true"
""" The true value """
FALSE_VALUE = "false"
""" The false value """
GET_METHOD_VALUE = "GET"
""" The get method value """
POST_METHOD_VALUE = "POST"
""" The post method value """
CONTENT_TYPE_CHARSET_VALUE = "content_type_charset"
""" The content type charset value """
HTTP_URI_VALUE = "http://"
""" The HTTP URI value """
HTTPS_URI_VALUE = "https://"
""" The HTTPS URI value """
XRI_URI_VALUE = "xri://="
""" The XRI URI value """
XRI_INITIALIZER_VALUE = "="
""" The XRI initializer value """
OPENID_NAMESPACE_VALUE = "http://specs.openid.net/auth/2.0"
""" The OpenID namespace value """
OPENID_SREG_1_1_EXTENSION_NAMESPACE_VALUE = "http://openid.net/extensions/sreg/1.1"
""" The OpenID SREG 1.1 extension namespace value """
OPENID_AX_1_0_EXTENSION_NAMESPACE_VALUE = "http://openid.net/srv/ax/1.0"
""" The OpenID ax 1.0 extension namespace value """
ASSOCIATE_MODE_VALUE = "associate"
""" The associate mode value """
CHECKID_SETUP_VALUE = "checkid_setup"
""" The checkid setup value """
CHECKID_IMMEDIATE_VALUE = "checkid_immediate"
""" The checkid immediate value """
ID_RES_VALUE = "id_res"
""" The id res value """
HMAC_SHA1_VALUE = "HMAC-SHA1"
""" The HMAC SHA1 value """
HMAC_SHA256_VALUE = "HMAC-SHA256"
""" The HMAC SHA256 value """
DH_SHA1_VALUE = "DH-SHA1"
""" The DH SHA1 value """
DH_SHA256_VALUE = "DH-SHA256"
""" The DH SHA256 value """
NO_ENCRYPTION_VALUE = "no-encryption"
""" The no encryption value """
XRDS_LOCATION_VALUE = "X-XRDS-Location"
""" The XRDS location value """
XRDS_LOCATION_LOWER_VALUE = "x-xrds-location"
""" The XRDS location lower value """
DEFAULT_OPENID_ASSOCIATE_TYPE = HMAC_SHA256_VALUE
""" The default OpenID associate type """
DEFAULT_OPENID_SESSION_TYPE = "no-encryption"
""" The default OpenID session type """
NONCE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZUNIQUE"
""" The nonce time format """
MAXIMUM_NONCE_VALUES_LIST_SIZE = 1000
""" The maximum nonce values list size """
HMAC_HASH_MODULES_MAP = {
HMAC_SHA1_VALUE : hashlib.sha1,
HMAC_SHA256_VALUE : hashlib.sha256,
DH_SHA1_VALUE : hashlib.sha1,
DH_SHA256_VALUE : hashlib.sha1
}
""" The map associating the HMAC values with the hashlib
hash function modules """
DIFFIE_HELLMAN_ASSOCIATION_TYPES = (
DH_SHA1_VALUE,
DH_SHA256_VALUE
)
""" The diffie hellman association types """
DEFAULT_PRIME_VALUE = colony.legacy.LONG(155172898181473697471232257763715539915724801966915404479707795314057629378541917580651227423698188993727816152646631438561595825688188889951272158842675419950341258706556549803580104870537681476726513255747040765857479291291572334510643245094715007229621094194349783925984760375594985848253359305585439638443)
""" The default prime value to be used in Diffie Hellman """
DEFAULT_BASE_VALUE = 2
""" The default base value to be used in diffie hellman """
class APIOpenID(colony.System):
"""
The API OpenID class.
"""
nonce_values_map = {}
""" The map associating the provider URL with the nonce values """
def __init__(self, plugin):
colony.System.__init__(self, plugin)
self.nonce_values_map = {}
def create_server(self, api_attributes, open_server = True):
"""
Creates a server, with the given API attributes.
:type api_attributes: Dictionary
:param api_attributes: The API attributes to be used.
:type open_server: bool
:param open_server: If the server should be opened.
:rtype: OpenIDServer
:return: The created server.
"""
# retrieves the Diffie Hellman plugin
diffie_hellman_plugin = self.plugin.diffie_hellman_plugin
# retrieves the random plugin
random_plugin = self.plugin.random_plugin
# retrieves the OpenID structure (if available) and uses it
# to create the "new" OpenID server
openid_structure = api_attributes.get("openid_structure", None)
openid_server = OpenIDServer(
self.plugin,
diffie_hellman_plugin,
random_plugin,
self,
openid_structure
)
# in case the server is meant to be open
# opens the server
open_server and openid_server.open()
# returns the OpenID server
return openid_server
def create_client(self, api_attributes, open_client = True):
"""
Creates a client, with the given API attributes.
:type api_attributes: Dictionary
:param api_attributes: The API attributes to be used.
:type open_client: bool
:param open_client: If the client should be opened.
:rtype: OpenIDClient
:return: The created client.
"""
# retrieves the client HTTP plugin
client_http_plugin = self.plugin.client_http_plugin
# retrieves the API Yadis plugin
api_yadis_plugin = self.plugin.api_yadis_plugin
# retrieves the OpenID structure (if available)
openid_structure = api_attributes.get("openid_structure", None)
# creates a new client with the given options, opens
# it in case it's required and returns the generated
# client to the caller method
openid_client = OpenIDClient(self.plugin, client_http_plugin, api_yadis_plugin, self, openid_structure)
open_client and openid_client.open()
return openid_client
def _verify_nonce(self, nonce_value, provider_url):
"""
Verifies if the nonce value does not exists in the current
nonce values database. The validation is made in accordance
with the OpenID specification.
:type nonce_value: String
:param nonce_value: The nonce value to be verified.
:type provider_url: String
:param provider_url: The provider URL to be used in
the verification.
:rtype: bool
:return: The result of the verification.
"""
# in case the provider URL does not exists in the
# global nonce values map
if not provider_url in self.nonce_values_map:
return True
# retrieves the nonce values map
nonce_values_map = self.nonce_values_map[provider_url][2]
# in case the nonce value exists in the
# nonce values map (collision)
if nonce_value in nonce_values_map:
# returns false
return False
# returns true
return True
def _update_nonce(self, nonce_value, provider_url):
"""
Updates the nonce database by adding the nonce value
to it, using the provider URL.
:type nonce_value: String
:param nonce_value: The nonce value to be added.
:type provider_url: String
:param provider_url: The provider URL to be used in
the addition.
"""
# in case the provider URL is not defined
# in the nonce values map
if not provider_url in self.nonce_values_map:
# sets the nonce values map
self.nonce_values_map[provider_url] = {}
# sets the nonce values list and map
self.nonce_values_map[provider_url][1] = []
self.nonce_values_map[provider_url][2] = {}
# retrieves the nonce values list and map
nonce_values_list = self.nonce_values_map[provider_url][1]
nonce_values_map = self.nonce_values_map[provider_url][2]
# retrieves the nonce values list length
nonce_values_list_length = len(nonce_values_list)
# in case the list is full (it's a circular list)
# the list needs to be kept at the same size (last item is removed)
if nonce_values_list_length == MAXIMUM_NONCE_VALUES_LIST_SIZE:
# retrieves the last element from the
# nonce values list (the oldest)
last_element = nonce_values_list[-1]
# removes the last element from the nonce values map
del nonce_values_map[last_element]
# pops the last element from the nonce values list
nonce_values_list.pop()
# inserts the item at the beginning of the list
# and sets the item in the map
nonce_values_list.insert(0, nonce_value)
nonce_values_map[nonce_value] = True
def _btwoc(self, long_value):
"""
Given some kind of integer (generally a long), this function
returns the big-endian two's complement as a binary string.
:type value: int
:param value: The value to be converted.
:rtype: String
:return: The big-endian two's complement as a binary string.
"""
# encodes the long value into string value
long_value_encoded = colony.encode_two_complement_string(long_value)
# converts the long value to a list value
list_value = list(long_value_encoded)
# reverses the list
list_value.reverse()
# joins the list to retrieve the result
result = "".join(list_value)
# returns the result
return result
def _mklong(self, btwoc):
"""
Given a big-endian two's complement string, return the
long int it represents.
:type btwoc: String
:param btwoc: A big-endian two's complement string
:rtype: int
:return: The decoded int value.
"""
# converts the string value to string
list_value = list(btwoc)
# reverses the string value
list_value.reverse()
# joins the list value to retrieve the string value
string_value = "".join(list_value)
# decodes the string value into long
result = colony.decode_two_complement_string(string_value)
# returns the result
return result
class OpenIDServer(object):
"""
The class that represents an OpenID server connection.
"""
api_openid_plugin = None
""" The API OpenID plugin """
diffie_hellman_plugin = None
""" The Diffie Hellman plugin """
random_plugin = None
""" The random plugin """
api_openid = | |
<gh_stars>10-100
# Copyright The IETF Trust 2015-2021, All Rights Reserved
# -*- coding: utf-8 -*-
import os
import time
from textwrap import dedent
from typing import List, Tuple # pyflakes:ignore
import debug # pyflakes:ignore
debug.debug = True
from django.conf import settings
from django.core import checks
from django.utils.module_loading import import_string
from django.utils.encoding import force_str
import ietf.utils.patch as patch
checks_run = [] # type: List[str]
def already_ran():
import inspect
outerframe = inspect.currentframe().f_back
name = outerframe.f_code.co_name
if name in checks_run:
return True
else:
checks_run.append(name)
return False
@checks.register('directories')
def check_cdn_directory_exists(app_configs, **kwargs):
"""This checks that the path from which the CDN will serve static files for
this version of the datatracker actually exists. In development and test
mode STATIC_ROOT will normally be just static/, but in production it will be
set to a different part of the file system which is served via CDN, and the
path will contain the datatracker release version.
"""
if already_ran():
return []
#
errors = []
if settings.SERVER_MODE == 'production' and not os.path.exists(settings.STATIC_ROOT):
errors.append(checks.Error(
"The static files directory has not been set up.",
hint="Please run 'ietf/manage.py collectstatic'.",
obj=None,
id='datatracker.E001',
))
return errors
@checks.register('files')
def check_group_email_aliases_exists(app_configs, **kwargs):
from ietf.group.views import check_group_email_aliases
#
if already_ran():
return []
#
errors = []
try:
ok = check_group_email_aliases()
if not ok:
errors.append(checks.Error(
"Found no aliases in the group email aliases file\n'%s'."%settings.GROUP_ALIASES_PATH,
hint="Please run the generate_group_aliases management command to generate them.",
obj=None,
id="datatracker.E0002",
))
except IOError as e:
errors.append(checks.Error(
"Could not read group email aliases:\n %s" % e,
hint="Please run the generate_group_aliases management command to generate them.",
obj=None,
id="datatracker.E0003",
))
return errors
@checks.register('files')
def check_doc_email_aliases_exists(app_configs, **kwargs):
from ietf.doc.views_doc import check_doc_email_aliases
#
if already_ran():
return []
#
errors = []
try:
ok = check_doc_email_aliases()
if not ok:
errors.append(checks.Error(
"Found no aliases in the document email aliases file\n'%s'."%settings.DRAFT_VIRTUAL_PATH,
hint="Please run the generate_draft_aliases management command to generate them.",
obj=None,
id="datatracker.E0004",
))
except IOError as e:
errors.append(checks.Error(
"Could not read document email aliases:\n %s" % e,
hint="Please run the generate_draft_aliases management command to generate them.",
obj=None,
id="datatracker.E0005",
))
return errors
@checks.register('directories')
def check_id_submission_directories(app_configs, **kwargs):
#
if already_ran():
return []
#
errors = []
for s in ("IDSUBMIT_STAGING_PATH", "IDSUBMIT_REPOSITORY_PATH", "INTERNET_DRAFT_ARCHIVE_DIR", ):
p = getattr(settings, s)
if not os.path.exists(p):
errors.append(checks.Critical(
"A directory used by the ID submission tool does not\n"
"exist at the path given in the settings file. The setting is:\n"
" %s = %s" % (s, p),
hint = ("Please either update the local settings to point at the correct\n"
"\tdirectory, or if the setting is correct, create the indicated directory.\n"),
id = "datatracker.E0006",
))
return errors
@checks.register('files')
def check_id_submission_files(app_configs, **kwargs):
#
if already_ran():
return []
#
errors = []
for s in ("IDSUBMIT_IDNITS_BINARY", ):
p = getattr(settings, s)
if not os.path.exists(p):
errors.append(checks.Critical(
"A file used by the ID submission tool does not exist\n"
"at the path given in the settings file. The setting is:\n"
" %s = %s" % (s, p),
hint = ("Please either update the local settings to point at the correct\n"
"\tfile, or if the setting is correct, make sure the file is in place and\n"
"\thas the right permissions.\n"),
id = "datatracker.E0007",
))
return errors
@checks.register('directories')
def check_yang_model_directories(app_configs, **kwargs):
#
if already_ran():
return []
#
errors = []
for s in ("SUBMIT_YANG_RFC_MODEL_DIR", "SUBMIT_YANG_DRAFT_MODEL_DIR", "SUBMIT_YANG_IANA_MODEL_DIR", "SUBMIT_YANG_CATALOG_MODEL_DIR",):
p = getattr(settings, s)
if not os.path.exists(p):
errors.append(checks.Critical(
"A directory used by the yang validation tools does\n"
"not exist at the path gvien in the settings file. The setting is:\n"
" %s = %s" % (s, p),
hint = ("Please either update your local settings to point at the correct\n"
"\tdirectory, or if the setting is correct, create the indicated directory.\n"),
id = "datatracker.E0017",
))
return errors
@checks.register('submission-checkers')
def check_id_submission_checkers(app_configs, **kwargs):
#
if already_ran():
return []
#
errors = []
for checker_path in settings.IDSUBMIT_CHECKER_CLASSES:
try:
checker_class = import_string(checker_path)
except Exception as e:
errors.append(checks.Critical(
"An exception was raised when trying to import the\n"
"draft submission checker class '%s':\n %s" % (checker_path, e),
hint = "Please check that the class exists and can be imported.\n",
id = "datatracker.E0008",
))
try:
checker = checker_class()
except Exception as e:
errors.append(checks.Critical(
"An exception was raised when trying to instantiate\n"
"the draft submission checker class '%s':\n %s" % (checker_path, e),
hint = "Please check that the class can be instantiated.\n",
id = "datatracker.E0009",
))
continue
for attr in ('name',):
if not hasattr(checker, attr):
errors.append(checks.Critical(
"The draft submission checker\n '%s'\n"
"has no attribute '%s', which is required" % (checker_path, attr),
hint = "Please update the class.\n",
id = "datatracker.E0010",
))
checker_methods = ("check_file_txt", "check_file_xml", "check_fragment_txt", "check_fragment_xml", )
for method in checker_methods:
if hasattr(checker, method):
break
else:
errors.append(checks.Critical(
"The draft submission checker\n '%s'\n"
" has no recognised checker method; "
"should be one or more of %s." % (checker_path, checker_methods),
hint = "Please update the class.\n",
id = "datatracker.E0011",
))
return errors
@checks.register('directories')
def check_media_directories(app_configs, **kwargs):
#
if already_ran():
return []
#
errors = []
for s in ("PHOTOS_DIR", ):
p = getattr(settings, s)
if not os.path.exists(p):
errors.append(checks.Critical(
"A directory used for media uploads and serves does\n"
"not exist at the path given in the settings file. The setting is:\n"
" %s = %s" % (s, p),
hint = ("Please either update the local settings to point at the correct\n"
"\tdirectory, or if the setting is correct, create the indicated directory.\n"),
id = "datatracker.E0012",
))
return errors
@checks.register('directories')
def check_proceedings_directories(app_configs, **kwargs):
#
if already_ran():
return []
#
errors = []
for s in ("AGENDA_PATH", ):
p = getattr(settings, s)
if not os.path.exists(p):
errors.append(checks.Critical(
"A directory used for meeting materials does not\n"
"exist at the path given in the settings file. The setting is:\n"
" %s = %s" % (s, p),
hint = ("Please either update the local settings to point at the correct\n"
"\tdirectory, or if the setting is correct, create the indicated directory.\n"),
id = "datatracker.E0013",
))
return errors
@checks.register('cache')
def check_cache(app_configs, **kwargs):
#
if already_ran():
return []
#
errors = []
if settings.SERVER_MODE == 'production':
from django.core.cache import cache
def cache_error(msg, errnum):
return checks.Warning(
( "A cache test failed with the message:\n '%s'.\n"
"This indicates that the cache is unavailable or not working as expected.\n"
"It will impact performance, but isn't fatal. The default cache is:\n"
" CACHES['default']['BACKEND'] = %s") % (
msg,
settings.CACHES["default"]["BACKEND"],
),
hint = "Please check that the configured cache backend is available.\n",
id = "datatracker.%s" % errnum,
)
cache_key = "checks:check_cache"
val = os.urandom(32)
wait = 1
cache.set(cache_key, val, wait)
if not cache.get(cache_key) == val:
errors.append(cache_error("Could not get value from cache", "E0014"))
time.sleep(wait+1)
# should have timed out
if cache.get(cache_key) == val:
errors.append(cache_error("Cache value didn't time out", "E0015"))
cache.set(cache_key, val, settings.SESSION_COOKIE_AGE)
if not cache.get(cache_key) == val:
errors.append(cache_error("Cache didn't accept session cookie age", "E0016"))
return errors
@checks.register('files')
def maybe_patch_library(app_configs, **kwargs):
errors = []
# Change path to our copy of django (this assumes we're running in a
# virtualenv, which we should)
import os, django, sys
django_path = os.path.dirname(django.__file__)
library_path = os.path.dirname(django_path)
top_dir = os.path.dirname(settings.BASE_DIR)
# All patches in settings.CHECKS_LIBRARY_PATCHES_TO_APPLY must have a
# relative file path rooted in the django dir, for instance
# 'django/db/models/fields/__init__.py'
for patch_file in settings.CHECKS_LIBRARY_PATCHES_TO_APPLY:
try:
patch_path = os.path.join(top_dir, patch_file)
patch_set = patch.fromfile(patch_path)
if patch_set:
if not patch_set.apply(root=library_path.encode('utf-8')):
errors.append(checks.Warning(
"Could not apply patch from file '%s'"%patch_file,
hint=("Make sure that the patch file contains a unified diff and has valid file paths\n\n"
"\tPatch root: %s\n"
"\tTarget files: %s\n") % (library_path, ', '.join(force_str(i.target) for i in patch_set.items)),
id="datatracker.W0002",
))
else:
# Patch succeeded or was a no-op
if (not patch_set.already_patched
and settings.SERVER_MODE != 'production'
and sys.argv[1] != 'check'):
errors.append(
checks.Error("Found an unpatched file, and applied the patch in %s" % (patch_file),
hint="You will need to re-run the command now that the patch in place.",
id="datatracker.E0022",
)
)
else:
errors.append(checks.Warning(
"Could not parse patch file '%s'"%patch_file,
hint="Make sure that the patch file contains a unified diff",
id="datatracker.W0001",
))
except IOError as e:
errors.append(
checks.Warning("Could not apply patch from %s: %s" % (patch_file, e),
hint="Check file permissions and locations",
id="datatracker.W0003",
)
)
pass
return errors
@checks.register('security')
def check_api_key_in_local_settings(app_configs, **kwargs):
errors = []
import ietf.settings_local
if settings.SERVER_MODE | |
#!/usr/bin/env python3
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Model and Query Classes for Endpoint Standard"""
from cbc_sdk.base import (MutableBaseModel, UnrefreshableModel, CreatableModelMixin, NewBaseModel, FacetQuery,
PaginatedQuery, QueryBuilder, QueryBuilderSupportMixin, IterableQueryMixin)
from cbc_sdk.base import Query as BaseEventQuery
from cbc_sdk.utils import convert_query_params
from cbc_sdk.errors import ApiError, TimeoutError
from cbc_sdk.platform.reputation import ReputationOverride
from copy import deepcopy
from pathlib import Path
import logging
import json
import time
import os
from cbc_sdk.errors import ServerError
log = logging.getLogger(__name__)
"""Endpoint Standard Models"""
class EndpointStandardMutableModel(MutableBaseModel):
"""Represents Endpoint Standard objects."""
_change_object_http_method = "PATCH"
_change_object_key_name = None
def __init__(self, cb, model_unique_id=None, initial_data=None, force_init=False, full_doc=False):
"""Initialize an EndpointStandardMutableModel with model_unique_id and initial_data."""
super(EndpointStandardMutableModel, self).__init__(cb, model_unique_id=model_unique_id,
initial_data=initial_data, force_init=force_init,
full_doc=full_doc)
if not self._change_object_key_name:
self._change_object_key_name = self.primary_key
def _query_implementation(cls, cb, **kwargs):
return Query(cls, cb, kwargs.get("query_string", None))
def _parse(self, obj):
if type(obj) == dict and self.info_key in obj:
return obj[self.info_key]
def _update_object(self):
if self._change_object_http_method != "PATCH":
return self._update_entire_object()
else:
return self._patch_object()
def _update_entire_object(self):
if self.__class__.primary_key in self._dirty_attributes.keys() or self._model_unique_id is None:
new_object_info = deepcopy(self._info)
try:
if not self._new_object_needs_primary_key:
del (new_object_info[self.__class__.primary_key])
except Exception:
pass
log.debug("Creating a new {0:s} object".format(self.__class__.__name__))
ret = self._cb.api_json_request(self.__class__._new_object_http_method, self.urlobject,
data={self.info_key: new_object_info})
else:
log.debug("Updating {0:s} with unique ID {1:s}".format(self.__class__.__name__, str(self._model_unique_id)))
ret = self._cb.api_json_request(self.__class__._change_object_http_method,
self._build_api_request_uri(), data={self.info_key: self._info})
return self._refresh_if_needed(ret)
def _patch_object(self):
if self.__class__.primary_key in self._dirty_attributes.keys() or self._model_unique_id is None:
log.debug("Creating a new {0:s} object".format(self.__class__.__name__))
ret = self._cb.api_json_request(self.__class__._new_object_http_method, self.urlobject,
data=self._info)
else:
updates = {}
for k in self._dirty_attributes.keys():
updates[k] = self._info[k]
log.debug("Updating {0:s} with unique ID {1:s}".format(self.__class__.__name__, str(self._model_unique_id)))
ret = self._cb.api_json_request(self.__class__._change_object_http_method,
self._build_api_request_uri(), data=updates)
return self._refresh_if_needed(ret)
def _refresh_if_needed(self, request_ret):
refresh_required = True
if request_ret.status_code not in range(200, 300):
try:
message = json.loads(request_ret.text)[0]
except Exception:
message = request_ret.text
raise ServerError(request_ret.status_code, message,
result="Did not update {} record.".format(self.__class__.__name__))
else:
message = request_ret.json()
log.debug("Received response: %s" % message)
if not isinstance(message, dict):
raise ServerError(request_ret.status_code, message,
result="Unknown error updating {0:s} record.".format(self.__class__.__name__))
else:
if message.get("success", False):
if isinstance(message.get(self.info_key, None), dict):
self._info = message.get(self.info_key)
self._full_init = True
refresh_required = False
else:
if self._change_object_key_name in message.keys():
# if all we got back was an ID, try refreshing to get the entire record.
log.debug("Only received an ID back from the server, forcing a refresh")
self._info[self.primary_key] = message[self._change_object_key_name]
refresh_required = True
else:
# "success" is False
raise ServerError(request_ret.status_code, message.get("message", ""),
result="Did not update {0:s} record.".format(self.__class__.__name__))
self._dirty_attributes = {}
if refresh_required:
self.refresh()
return self._model_unique_id
class Event(NewBaseModel):
"""Represents an Endpoint Standard Event."""
urlobject = "/integrationServices/v3/event"
primary_key = "eventId"
info_key = "eventInfo"
def _parse(self, obj):
if type(obj) == dict and self.info_key in obj:
return obj[self.info_key]
def __init__(self, cb, model_unique_id, initial_data=None):
"""Initialize an Event with model_unique_id and initial_data."""
super(Event, self).__init__(cb, model_unique_id, initial_data)
@classmethod
def _query_implementation(cls, cb, **kwargs):
return Query(cls, cb, kwargs.get("query_string", None))
class Policy(EndpointStandardMutableModel, CreatableModelMixin):
"""Represents an Endpoint Standard Policy."""
urlobject = "/integrationServices/v3/policy"
info_key = "policyInfo"
swagger_meta_file = "endpoint_standard/models/policyInfo.yaml"
_change_object_http_method = "PUT"
_change_object_key_name = "policyId"
@classmethod
def _query_implementation(cls, cb, **kwargs):
return Query(cls, cb, kwargs.get("query_string", None))
@property
def rules(self):
"""Returns a dictionary of rules and rule IDs for this Policy."""
return dict([(r.get("id"), r) for r in self.policy.get("rules", [])])
def add_rule(self, new_rule):
"""Adds a rule to this Policy.
Arguments:
new_rule (dict(str,str)): The new rule to add to this Policy.
Notes:
- The new rule must conform to this dictionary format:
{"action": "ACTION",
"application": {"type": "TYPE", "value": "VALUE"},
"operation": "OPERATION",
"required": "REQUIRED"}
- The dictionary keys have these possible values:
"action": ["IGNORE", "ALLOW", "DENY", "TERMINATE_PROCESS",
"TERMINATE_THREAD", "TERMINATE"]
"type": ["NAME_PATH", "SIGNED_BY", "REPUTATION"]
"value": Any string value to match on
"operation": ["BYPASS_ALL", "INVOKE_SCRIPT", "INVOKE_SYSAPP",
"POL_INVOKE_NOT_TRUSTED", "INVOKE_CMD_INTERPRETER",
"RANSOM", "NETWORK", "PROCESS_ISOLATION", "CODE_INJECTION",
"MEMORY_SCRAPE", "RUN_INMEMORY_CODE", "ESCALATE", "RUN"]
"required": [True, False]
"""
self._cb.post_object("{0}/rule".format(self._build_api_request_uri()), {"ruleInfo": new_rule})
self.refresh()
def delete_rule(self, rule_id):
"""Deletes a rule from this Policy."""
self._cb.delete_object("{0}/rule/{1}".format(self._build_api_request_uri(), rule_id))
self.refresh()
def replace_rule(self, rule_id, new_rule):
"""Replaces a rule in this policy."""
self._cb.put_object("{0}/rule/{1}".format(self._build_api_request_uri(), rule_id),
{"ruleInfo": new_rule})
self.refresh()
class EnrichedEvent(UnrefreshableModel):
"""Represents an enriched event retrieved by one of the Enterprise EDR endpoints."""
default_sort = 'device_timestamp'
primary_key = "event_id"
@classmethod
def _query_implementation(self, cb, **kwargs):
# This will emulate a synchronous enriched event query, for now.
return EnrichedEventQuery(self, cb)
def __init__(self, cb, model_unique_id=None, initial_data=None, force_init=False, full_doc=True):
"""
Initialize the EnrichedEvent object.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
model_unique_id (Any): The unique ID for this particular instance of the model object.
initial_data (dict): The data to use when initializing the model object.
force_init (bool): True to force object initialization.
full_doc (bool): True to mark the object as fully initialized.
"""
self._details_timeout = 0
self._info = None
if model_unique_id is not None and initial_data is None:
enriched_event_future = cb.select(EnrichedEvent).where(event_id=model_unique_id).execute_async()
result = enriched_event_future.result()
if len(result) == 1:
initial_data = result[0]
super(EnrichedEvent, self).__init__(cb, model_unique_id=model_unique_id, initial_data=initial_data,
force_init=force_init, full_doc=full_doc)
@property
def process_sha256(self):
"""Returns a string representation of the SHA256 hash for this process.
Returns:
hash (str): SHA256 hash of the process.
"""
if "process_hash" in self._info:
return next((hsh for hsh in self.process_hash if len(hsh) == 64), None)
else:
return None
def get_details(self, timeout=0, async_mode=False):
"""Requests detailed results.
Args:
timeout (int): Event details request timeout in milliseconds.
async_mode (bool): True to request details in an asynchronous manner.
Note:
- When using asynchronous mode, this method returns a python future.
You can call result() on the future object to wait for completion and get the results.
"""
self._details_timeout = timeout
if not self.event_id:
raise ApiError("Trying to get event details on an invalid event_id")
if async_mode:
return self._cb._async_submit(lambda arg, kwarg: self._get_detailed_results())
else:
return self._get_detailed_results()
def _get_detailed_results(self):
"""Actual search details implementation"""
args = {"event_ids": [self.event_id]}
url = "/api/investigate/v2/orgs/{}/enriched_events/detail_jobs".format(self._cb.credentials.org_key)
query_start = self._cb.post_object(url, body=args)
job_id = query_start.json().get("job_id")
timed_out = False
submit_time = time.time() * 1000
while True:
status_url = "/api/investigate/v2/orgs/{}/enriched_events/detail_jobs/{}".format(
self._cb.credentials.org_key,
job_id,
)
result = self._cb.get_object(status_url)
searchers_contacted = result.get("contacted", 0)
searchers_completed = result.get("completed", 0)
log.debug("contacted = {}, completed = {}".format(searchers_contacted, searchers_completed))
if searchers_contacted == 0:
time.sleep(.5)
continue
if searchers_completed < searchers_contacted:
if self._details_timeout != 0 and (time.time() * 1000) - submit_time > self._details_timeout:
timed_out = True
break
else:
break
time.sleep(.5)
if timed_out:
raise TimeoutError(message="user-specified timeout exceeded while waiting for results")
log.debug("Pulling detailed results, timed_out={}".format(timed_out))
still_fetching = True
result_url = "/api/investigate/v2/orgs/{}/enriched_events/detail_jobs/{}/results".format(
self._cb.credentials.org_key,
job_id
)
query_parameters = {}
while still_fetching:
result = self._cb.get_object(result_url, query_parameters=query_parameters)
total_results = result.get('num_available', 0)
found_results = result.get('num_found', 0)
# if found is 0, then no enriched events
if found_results == 0:
return self
if total_results != 0:
results = result.get('results', [])
self._info = results[0]
return self
def ban_process_sha256(self, description=""):
"""Bans the application by adding the process_sha256 to the BLACK_LIST
Args:
description: The justification for why the application was added to the BLACK_LIST
Returns:
ReputationOverride (cbc_sdk.platform.ReputationOverride): ReputationOverride object
created in the Carbon Black Cloud
"""
return ReputationOverride.create(self._cb, {
"description": description,
"override_list": "BLACK_LIST",
"override_type": "SHA256",
"sha256_hash": self.process_sha256,
"filename": Path(self.process_name.replace('\\', os.sep)).name})
def approve_process_sha256(self, description=""):
"""Approves the application by adding the process_sha256 to the WHITE_LIST
Args:
description: The justification for why the application was added to the WHITE_LIST
Returns:
ReputationOverride (cbc_sdk.platform.ReputationOverride): ReputationOverride object
created in the Carbon Black Cloud
"""
return ReputationOverride.create(self._cb, {
"description": description,
"override_list": "WHITE_LIST",
"override_type": "SHA256",
"sha256_hash": self.process_sha256,
"filename": Path(self.process_name.replace('\\', os.sep)).name})
class EnrichedEventFacet(UnrefreshableModel):
"""Represents an enriched event retrieved by one of the Enterprise EDR endpoints."""
primary_key = "job_id"
swagger_meta_file = "endpoint_standard/models/enriched_event_facet.yaml"
submit_url = "/api/investigate/v2/orgs/{}/enriched_events/facet_jobs"
result_url = "/api/investigate/v2/orgs/{}/enriched_events/facet_jobs/{}/results"
class Terms(UnrefreshableModel):
"""Represents the facet fields and values associated with an Enriched Event Facet query."""
def __init__(self, cb, initial_data):
"""Initialize an EnrichedEventFacet Terms object with initial_data."""
super(EnrichedEventFacet.Terms, self).__init__(
cb,
model_unique_id=None,
initial_data=initial_data,
force_init=False,
full_doc=True,
)
self._facets = {}
for facet_term_data in initial_data:
field = facet_term_data["field"]
values = facet_term_data["values"]
self._facets[field] = values
@property
def facets(self):
"""Returns the terms' facets for this result."""
return self._facets
@property
def fields(self):
"""Returns the terms facets' fields for this result."""
return [field for field in self._facets]
class Ranges(UnrefreshableModel):
"""Represents the range (bucketed) facet fields and values associated with an Enriched Event Facet query."""
def __init__(self, cb, initial_data):
"""Initialize an EnrichedEventFacet Ranges object with initial_data."""
super(EnrichedEventFacet.Ranges, self).__init__(
cb,
model_unique_id=None,
initial_data=initial_data,
force_init=False,
full_doc=True,
)
| |
for plotting purposes #
###############################################################
###############################################################
# if qafile is not None:
# outfile=qa.write_qa_ql(qafile,retval)
# log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
fig.plot_bias_overscan(retval,qafig,plotconf=plotconf,hardplots=hardplots)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Get_RMS(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="RMS"
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NOISE_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NOISE_AMP_STATUS"
kwargs["RESULTKEY"]=key
kwargs["QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NOISE_WARN_RANGE" in parms and "NOISE_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NOISE_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NOISE_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
log.critical("No parameter is found for this QA")
sys.exit("Update the configuration file for the parameters")
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
if kwargs["singleqa"] == 'Get_RMS':
night = kwargs['night']
expid = '{:08d}'.format(kwargs['expid'])
camera = kwargs['camera']
image = get_image('preproc',night,expid,camera,kwargs["specdir"])
else: image=args[0]
inputs=get_inputs(*args,**kwargs)
return self.run_qa(image,inputs)
def run_qa(self,image,inputs):
camera=inputs["camera"]
paname=inputs["paname"]
amps=inputs["amps"]
qafile=inputs["qafile"]
qafig=inputs["qafig"]
param=inputs["param"]
refmetrics=inputs["refmetrics"]
plotconf=inputs["plotconf"]
hardplots=inputs["hardplots"]
retval={}
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["CAMERA"] = camera
retval["FLAVOR"] = flavor = image.meta["FLAVOR"]
kwargs=self.config['kwargs']
if flavor == 'science':
fibmap =fits.open(kwargs['FiberMap'])
retval["PROGRAM"]=fibmap[1].header['PROGRAM']
retval["NIGHT"] = image.meta["NIGHT"]
# return rms values in rms/sqrt(exptime)
#rmsccd=qalib.getrms(image.pix/np.sqrt(image.meta["EXPTIME"])) #- should we add dark current and/or readnoise to this as well?
#rmsccd = np.mean([image.meta['RDNOISE1'],image.meta['RDNOISE2'],image.meta['RDNOISE3'],image.meta['RDNOISE4']]) #--> "NOISE":rmsccd
if param is None:
log.critical("No parameter is given for this QA! ")
sys.exit("Check the configuration file")
retval["PARAMS"] = param
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# SE: this section is moved from BIAS_FROM_OVERSCAN to header
data=[]
row_data_amp1=[]
row_data_amp2=[]
row_data_amp3=[]
row_data_amp4=[]
bias_patnoise=[]
#bias_overscan=[]
#RS: loop through amps based on header info
loop_amps = get_amp_ids(image.meta)
exptime=image.meta["EXPTIME"]
if exptime == 0.:
exptime = 1.
for kk in loop_amps:
sel=parse_sec_keyword(image.meta['BIASSEC'+kk])
#- Obtain counts/second in bias region
# pixdata=image[sel]/header["EXPTIME"]
pixdata=image.pix[sel]/exptime
if kk == '1' or kk == 'A':
for i in range(pixdata.shape[0]):
row_amp1=pixdata[i]
row_data_amp1.append(row_amp1)
if kk == '2' or kk == 'B':
for i in range(pixdata.shape[0]):
row_amp2=pixdata[i]
row_data_amp2.append(row_amp2)
if kk == '3' or kk == 'C':
for i in range(pixdata.shape[0]):
row_amp3=pixdata[i]
row_data_amp3.append(row_amp3)
if kk == '4' or kk == 'D':
for i in range(pixdata.shape[0]):
row_amp4=pixdata[i]
row_data_amp4.append(row_amp4)
#- Compute statistics of the bias region that only reject
# the 0.5% of smallest and largest values. (from sdssproc)
isort=np.sort(pixdata.ravel())
nn=isort.shape[0]
bias=np.mean(isort[int(0.005*nn) : int(0.995*nn)])
#bias_overscan.append(bias)
data.append(isort)
#- Combine data from each row per amp and take average
# BIAS_ROW = mean_row
median_row_amp1=[]
for i in range(len(row_data_amp1)):
median=np.median(row_data_amp1[i])
median_row_amp1.append(median)
rms_median_row_amp1= np.std(median_row_amp1)
try:
noise1 = image.meta['RDNOISE1']
except:
noise1 = image.meta['OBSRDNA']
bias_patnoise.append(rms_median_row_amp1/noise1)
median_row_amp2=[]
for i in range(len(row_data_amp2)):
median=np.median(row_data_amp2[i])
median_row_amp2.append(median)
rms_median_row_amp2= np.std(median_row_amp2)
try:
noise2 = image.meta['RDNOISE2']
except:
noise2 = image.meta['OBSRDNB']
bias_patnoise.append(rms_median_row_amp2/noise2)
median_row_amp3=[]
for i in range(len(row_data_amp3)):
median=np.median(row_data_amp3[i])
median_row_amp3.append(median)
rms_median_row_amp3= np.std(median_row_amp3)
try:
noise3 = image.meta['RDNOISE3']
except:
noise3 = image.meta['OBSRDNC']
bias_patnoise.append(rms_median_row_amp3/noise3)
median_row_amp4=[]
for i in range(len(row_data_amp4)):
median=np.median(row_data_amp4[i])
median_row_amp4.append(median)
rms_median_row_amp4= np.std(median_row_amp4)
try:
noise4 = image.meta['RDNOISE4']
except:
noise4 = image.meta['OBSRDND']
bias_patnoise.append(rms_median_row_amp4/noise4)
#- Calculate upper and lower bounds of 1, 2, and 3 sigma
full_data=np.concatenate((data[0],data[1],data[2],data[3])).ravel()
sig1_lo = np.percentile(full_data,50.-(param['PERCENTILES'][0]/2.))
sig1_hi = np.percentile(full_data,50.+(param['PERCENTILES'][0]/2.))
sig2_lo = np.percentile(full_data,50.-(param['PERCENTILES'][1]/2.))
sig2_hi = np.percentile(full_data,50.+(param['PERCENTILES'][1]/2.))
sig3_lo = np.percentile(full_data,50.-(param['PERCENTILES'][2]/2.))
sig3_hi = np.percentile(full_data,50.+(param['PERCENTILES'][2]/2.))
#- Find difference between upper and lower sigma bounds
# DIFF1SIG: The number of counts separating the 1 sigma percentiles in the noise distribution (from the overscan region)
diff1sig = sig1_hi - sig1_lo
# DIFF2SIG: The number of counts separating 2 or 3 sigma in the noise distribution
diff2sig = sig2_hi - sig2_lo
diff3sig = sig3_hi - sig3_lo
#-DATA5SIG: number of pixels more than 5 sigma below the bias level
sig5_value = np.percentile(full_data,3e-5)
data5sig = len(np.where(full_data <= sig5_value)[0])
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if amps:
rms_over_amps = [noise1,noise2,noise3,noise4]
try:
rms_amps = [image.meta['OBSRDN1'],image.meta['OBSRDN2'],image.meta['OBSRDN3'],image.meta['OBSRDN4']]
except:
rms_amps = [image.meta['OBSRDNA'],image.meta['OBSRDNB'],image.meta['OBSRDNC'],image.meta['OBSRDND']]
retval["METRICS"]={"NOISE_AMP":np.array(rms_amps),"NOISE_OVERSCAN_AMP":np.array(rms_over_amps),"DIFF1SIG":diff1sig,"DIFF2SIG":diff2sig,"DATA5SIG":data5sig,"BIAS_PATNOISE":bias_patnoise}#,"NOISE_ROW":noise_row,"EXPNUM_WARN":expnum,"NOISE_OVER":rmsover
else:
retval["METRICS"]={"DIFF1SIG":diff1sig,"DIFF2SIG":diff2sig,"DATA5SIG":data5sig, "BIAS_PATNOISE":bias_patnoise} # Dropping "NOISE_OVER":rmsover,"NOISE_ROW":noise_row,"EXPNUM_WARN":expnum
###############################################################
# This section is for adding QA metrics for plotting purposes #
###############################################################
###############################################################
# if qafile is not None:
# outfile=qa.write_qa_ql(qafile,retval)
# log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
fig.plot_RMS(retval,qafig,plotconf=plotconf,hardplots=hardplots)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Calc_XWSigma(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="XWSIGMA"
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "XWSIGMA"
status=kwargs['statKey'] if 'statKey' in kwargs else "XWSIGMA_STATUS"
kwargs["RESULTKEY"]=key
kwargs["QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "XWSIGMA_WARN_RANGE" in parms and "XWSIGMA_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["XWSIGMA_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["XWSIGMA_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
log.critical("No parameter is found for this QA")
sys.exit("Update the configuration file for the parameters")
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
if kwargs["singleqa"] == 'Calc_XWSigma':
night = kwargs['night']
expid = '{:08d}'.format(kwargs['expid'])
camera = kwargs['camera']
image = get_image('preproc',night,expid,camera,kwargs["specdir"])
else: image=args[0]
inputs=get_inputs(*args,**kwargs)
return self.run_qa(image,inputs)
def run_qa(self,image,inputs):
import desispec.quicklook.qlpsf
from scipy.optimize import curve_fit
camera=inputs["camera"]
paname=inputs["paname"]
fibermap=inputs["fibermap"]
psffile=inputs["psf"]
psf=desispec.quicklook.qlpsf.PSF(psffile)
amps=inputs["amps"]
allpeaks=inputs["Peaks"]
qafile=inputs["qafile"]
qafig=inputs["qafig"]
param=inputs["param"]
refmetrics=inputs["refmetrics"]
plotconf=inputs["plotconf"]
hardplots=inputs["hardplots"]
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = camera
retval["FLAVOR"] = image.meta["FLAVOR"]
kwargs=self.config['kwargs']
if image.meta["FLAVOR"] == 'science':
fibmap =fits.open(kwargs['FiberMap'])
retval["PROGRAM"]=program=fibmap[1].header['PROGRAM']
retval["NIGHT"] = image.meta["NIGHT"]
if param is None:
log.critical("No parameter is given for this QA! ")
sys.exit("Check the configuration file")
retval["PARAMS"] = param
#- Ensure that the QA will run even if 500 spectra aren't present
if fibermap['FIBER'].shape[0] >= 500:
fibers = 500
else:
fibers = fibermap['FIBER'].shape[0]
#- Define number of pixels to be fit
dp=param['PIXEL_RANGE']/2
#- Get wavelength ranges around peaks
peaks=allpeaks['{}_PEAKS'.format(camera[0].upper())]
#- Maximum allowed fit sigma value
maxsigma=param['MAX_SIGMA']
xfails=[]
wfails=[]
xsigma=[]
wsigma=[]
xsigma_amp1=[]
wsigma_amp1=[]
xsigma_amp2=[]
wsigma_amp2=[]
xsigma_amp3=[]
wsigma_amp3=[]
xsigma_amp4=[]
wsigma_amp4=[]
for fiber in range(fibers):
xs = -1 # SE: this prevents crash in "XWSIGMA_AMP" for when xs or ws is empty list -> try b9 of 20200515/00000001
ws = -1
xsig=[]
wsig=[]
for peak in range(len(peaks)):
#- Use psf information to convert wavelength to pixel values
xpixel=desispec.quicklook.qlpsf.PSF.x(psf,ispec=fiber,wavelength=peaks[peak])[0][0]
ypixel=desispec.quicklook.qlpsf.PSF.y(psf,ispec=fiber,wavelength=peaks[peak])[0][0]
#- Find x and y pixel values around sky lines
xpix_peak=np.arange(int(xpixel-dp),int(xpixel+dp),1)
ypix_peak=np.arange(int(ypixel-dp),int(ypixel+dp),1)
#- Fit gaussian to counts in pixels around sky line
#- If any values fail, store x/w, wavelength, and fiber
try:
xpopt,xpcov=curve_fit(qalib.gauss,np.arange(len(xpix_peak)),image.pix[int(ypixel),xpix_peak])
xs=np.abs(xpopt[2])
if xs <= maxsigma:
xsig.append(xs)
else:
xfail=[fiber,peaks[peak]]
xfails.append(xfail)
except:
xfail=[fiber,peaks[peak]]
xfails.append(xfail)
pass
try:
wpopt,wpcov=curve_fit(qalib.gauss,np.arange(len(ypix_peak)),image.pix[ypix_peak,int(xpixel)])
ws=np.abs(wpopt[2])
if ws <= maxsigma:
wsig.append(ws)
else:
wfail=[fiber,peaks[peak]]
wfails.append(wfail)
except:
wfail=[fiber,peaks[peak]]
wfails.append(wfail)
pass
#- Excluding fibers 240-260 in case some fibers overlap amps
#- Excluding peaks in the center of image in case peak overlaps two amps
#- This shouldn't cause a significant loss of information
if amps:
if fibermap['FIBER'][fiber]<240:
if ypixel < 2000.:
xsigma_amp1.append(xs)
wsigma_amp1.append(ws)
if ypixel > 2100.:
xsigma_amp3.append(xs)
wsigma_amp3.append(ws)
if fibermap['FIBER'][fiber]>260:
if ypixel < 2000.:
xsigma_amp2.append(xs)
wsigma_amp2.append(ws)
if ypixel > 2100.:
xsigma_amp4.append(xs)
wsigma_amp4.append(ws)
if len(xsig)!=0:
xsigma.append(np.mean(xsig))
if len(wsig)!=0:
wsigma.append(np.mean(wsig))
if fibermap['FIBER'].shape[0]<260:
xsigma_amp2=[]
xsigma_amp4=[]
wsigma_amp2=[]
wsigma_amp4=[]
#- Calculate desired output metrics
xsigma_med=np.median(np.array(xsigma))
wsigma_med=np.median(np.array(wsigma))
xsigma_amp=np.array([np.median(xsigma_amp1),np.median(xsigma_amp2),np.median(xsigma_amp3),np.median(xsigma_amp4)])
wsigma_amp=np.array([np.median(wsigma_amp1),np.median(wsigma_amp2),np.median(wsigma_amp3),np.median(wsigma_amp4)])
xwfails=np.array([xfails,wfails])
#SE: mention the example here when the next lines are ineffective and when they are effective in removing the NaN from XWSIGMA_AMP--> XWSIGMA itself no longer includes any NaN value. As we both know, this is not the way to properly deal with NaNs -->let's see if switching to non-scipy fuction would bring about a better solution
if len(xsigma)==0:
xsigma=[param['XWSIGMA_{}_REF'.format(program.upper())][0]]
if len(wsigma)==0:
wsigma=[param['XWSIGMA_{}_REF'.format(program.upper())][1]]
#- Combine metrics for x and w
xwsigma_fib=np.array((xsigma,wsigma)) #- (2,nfib)
xwsigma_med=np.array((xsigma_med,wsigma_med)) #- (2)
xwsigma_amp=np.array((xsigma_amp,wsigma_amp))
if amps:
#if len(xsigma_amp1)==0 :
#xsigma_amp1 = [param['XWSIGMA_REF'][0]]
#if len(xsigma_amp2)==0 :
#xsigma_amp2 = [param['XWSIGMA_REF'][0]]
#if len(xsigma_amp3)==0 :
#xsigma_amp3 = [param['XWSIGMA_REF'][0]]
#if len(xsigma_amp4)==0 :
#xsigma_amp4 = [param['XWSIGMA_REF'][0]]
#if len(wsigma_amp1)==0 :
#wsigma_amp1 = [param['XWSIGMA_REF'][1]]
#if len(wsigma_amp2)==0 :
#wsigma_amp2 = [param['XWSIGMA_REF'][1]]
#if len(wsigma_amp3)==0 :
#wsigma_amp3 = [param['XWSIGMA_REF'][1]]
#if len(wsigma_amp4)==0 :
#wsigma_amp4 = [param['XWSIGMA_REF'][1]]
retval["METRICS"]={"XWSIGMA":xwsigma_med,"XWSIGMA_FIB":xwsigma_fib,"XWSIGMA_AMP":xwsigma_amp}#,"XWSHIFT":xwshift,"XWSHIFT_AMP":xwshift_amp,"XWSIGMA_SHIFT": xwsigma_shift}
else:
retval["METRICS"]={"XWSIGMA":xwsigma_med,"XWSIGMA_FIB":xwsigma_fib}#,"XWSHIFT":xwshift,"XWSIGMA_SHIFT": xwsigma_shift}
###############################################################
# This section is for adding QA metrics for plotting purposes #
###############################################################
###############################################################
# if qafile is not None:
# outfile=qa.write_qa_ql(qafile,retval)
# log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
fig.plot_XWSigma(retval,qafig,plotconf=plotconf,hardplots=hardplots)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return | |
import re
import sys
import six
# To use the assembler: python assembler.py inputfile.asm
class Assembler(object):
def __init__(self, address):
self.address = address
self.pseudo_ops = ['.ORIG', '.END', '.BLKW', '.FILL', '.STRINGZ']
self.op_instructions = {"ADD": 0b0001 << 12, "AND": 0b0101 << 12, "NOT": 0b1001 << 12}
self.ctrl_instructions = {"BRN": 0b0000100 << 9, "BRZ": 0b0000010 << 9, "BRP": 0b0000001 << 9, "BRNZ": 0b0000110 << 9,
"BRZP": 0b0000011 << 9, "BRNP": 0b0000101 << 9, "BRNZP": 0b0000111 << 9,
"JSR": 0b01001 << 11, "JSRR": 0b01000 << 11, "RTI": 0b1000000000000000, "JMP": 0b1100 << 12,
"RET": 0b1100000111000000, "TRAP": 0b11110000 << 8, "HALT": 0b1111000000100101,
"GETC": 0b1111000000100000, "OUT": 0b1111000000100001, "PUTS": 0b1111000000100010,
"IN": 0b1111000000100011, "PUTSP": 0b1111000000100100,}
self.data_movement_instructions = {"LD": 0b0010 << 12, "LDR": 0b0110 << 12, "LDI": 0b1010 << 12, "LEA": 0b1110 << 12,
"ST": 0b0011 << 12, "STR": 0b0111 << 12, "STI": 0b1011 << 12}
self.opcode_table = {**self.data_movement_instructions, **self.op_instructions, **self.ctrl_instructions}
self.immediate_length = {'ADD': 5, 'AND': 5, 'BRN': 9, 'BRZ': 9, 'BRP': 9, 'BRNZ': 9, 'BRNP': 9, 'BRZP': 9, 'BRNZP': 9,
'GETC': 0, 'HALT': 0, 'IN': 0, 'JMP': 0, 'JMPT': 0, 'JSR': 11, 'JSRR': 0, 'LD': 9, 'LDI': 9,
'LDR': 6, 'LEA': 9, 'NOT': 9, 'OUT': 0, 'PUTS': 0, 'PUTSP': 0, 'RET': 0, 'RTI': 0, 'RTT': 0,
'ST': 9, 'STI': 9, 'STR': 6, 'TRAP': 8, 'UNDEFINED': 0}
self.immediate_mask = {5: 31, 9: 511, 0: 0, 11: 2047, 6: 63, 8: 255}
self.regs = {'R%d' % i: i for i in range(0, 8)}
def get_op_instruction(self, word, pc):
# instruction = 0b0 << 16
if word[0] == 'NOT':
if len(word) > 3:
raise SyntaxError(
'The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
try:
DR = self.regs[word[1]] << 9
SR1 = self.regs[word[2]] << 6
except KeyError:
raise KeyError(
'Register does not exist.\nLocation:PC='+hex(pc-1))
return (self.op_instructions[word[0]] + DR + SR1) | (0b111111)
elif word[0] == 'ADD' or word[0] == 'AND':
if len(word) > 4:
raise SyntaxError(
'The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
try:
DR = self.regs[word[1]] << 9
SR1 = self.regs[word[2]] << 6
except KeyError:
raise KeyError(
'Register does not exist.\nLocation:PC='+hex(pc-1))
if word[3] in self.regs:
SR2 = self.regs[word[3]]
return self.op_instructions[word[0]] + DR + SR1 + SR2
elif '#' in word[3] or 'x' in word[3] or 'X' in word[3]:
if not self.judge_immediate(word[3], high=15, low=-17, pc=pc):
raise ValueError(
'Could not be represented as a signed number in 5 bits.\nLocation:PC='+hex(pc-1))
try:
im = self.get_immediate(word[3], pc, self.immediate_mask[self.immediate_length[word[0]]])
except BaseException:
raise SyntaxError('Invalid syntax.\nLocation:PC='+hex(pc-1))
return self.op_instructions[word[0]] + DR + SR1 +0b100000 + im
else:
raise SyntaxError(
'Unrecognized opcode or syntax error.\nLocation:PC='+hex(pc-1))
else:
raise SyntaxError(
'Unrecognized opcode or syntax error.\nLocation:PC='+hex(pc-1))
def get_data_movement_instruction(self, word, pc):
if word[0] == 'LDR' or word[0] == 'STR':
if len(word) > 4:
raise SyntaxError(
'The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
try:
DR_or_SR = self.regs[word[1]] << 9
BaseR = self.regs[word[2]] << 6
except KeyError:
raise KeyError(
'Register does not exist.\nLocation:PC='+hex(pc-1))
if not self.judge_immediate(word[3], high=15, low=-17, pc=pc):
raise ValueError(
'Could not be represented as a signed number in 5 bits.\nLocation:PC='+hex(pc-1))
try:
im = self.get_immediate(
word[3], pc, self.immediate_mask[self.immediate_length[word[0]]])
except BaseException:
raise SyntaxError('Invalid syntax.\nLocation:PC='+hex(pc-1))
return self.data_movement_instructions[word[0]] + DR_or_SR + BaseR + im
elif word[0] == 'LD' or word[0] == 'LDI'or word[0] == 'LEA'or word[0] == 'ST' or word[0] == 'STI':
if len(word) > 3:
raise SyntaxError(
'The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
try:
DR_or_SR = self.regs[word[1]] << 9
except KeyError:
raise KeyError(
'Register does not exist.\nLocation:PC='+hex(pc-1))
im_mask = self.immediate_mask[self.immediate_length[word[0]]]
if word[2] in self.symbol_table:
pc_offset = hex(int(self.symbol_table[word[2]], 16) - (pc))
# print(pc_offset)
if pc_offset.startswith('-'):
try:
addr = self.get_immediate(pc_offset[1:], pc, im_mask)
except BaseException:
raise SyntaxError(
'Invalid syntax.\nLocation:PC='+hex(pc-1))
pc_offset = (~int(addr) + 1) & 511
else:
try:
pc_offset = self.get_immediate(
pc_offset, pc, im_mask)
except BaseException:
raise SyntaxError(
'Invalid syntax.\nLocation:PC='+hex(pc-1))
return self.data_movement_instructions[word[0]] + DR_or_SR + pc_offset
else:
raise SyntaxError('Symbol not found.\nLocation:PC='+hex(pc-1))
else:
raise SyntaxError(
'Unrecognized opcode or syntax error.\nLocation:PC='+hex(pc-1))
def get_ctrl_instruction(self, word, pc):
if word[0] == 'RET' or word[0] == 'RTI' or word[0] == 'HALT' or word[0] == 'OUT' or word[0] == 'GETC' \
or word[0] == 'PUTS' or word[0] == 'IN' or word[0] == 'PUTSP':
if len(word) >= 2:
raise SyntaxError(
'The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
return self.ctrl_instructions[word[0]]
elif word[0] == 'JMP' or word[0] == 'JSRR':
if len(word) > 2:
raise SyntaxError('The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
try:
BaseR = self.regs[word[1]] << 6
except KeyError:
raise KeyError('Register does not exist.\nLocation:PC='+hex(pc-1))
return self.ctrl_instructions[word[0]] + BaseR
elif 'BR' in word[0] or word[0] == 'JSR':
if len(word) > 2:
raise SyntaxError(
'The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
im_mask = self.immediate_mask[self.immediate_length[word[0].upper()]]
if word[1] in self.symbol_table:
pc_offset = hex(int(self.symbol_table[word[1]], 16) - (pc))
if pc_offset.startswith('-'): #TODO: A bug is to be fixed.
try:
addr = self.get_immediate(pc_offset[1:], pc, im_mask)
except BaseException:
raise SyntaxError('Invalid syntax.\nLocation:PC='+hex(pc-1))
if word[0] == 'JSR':
pc_offset = (~int(addr) + 1) & 2047
else:
pc_offset = (~int(addr) + 1) & 511
else:
try:
pc_offset = self.get_immediate(pc_offset, pc, im_mask)
except BaseException:
raise SyntaxError('Invalid syntax.\nLocation:PC='+hex(pc-1))
return self.ctrl_instructions[word[0].upper()] + pc_offset
else:
raise SyntaxError('Symbol not found.\nLocation:PC='+hex(pc-1))
elif 'TRAP' == word[0]:
if len(word) > 2:
raise SyntaxError('The number of oprands is more than excepted.\nLocation:PC='+hex(pc-1))
if not self.judge_immediate(word[1], high=255, low=0, pc=pc):
raise ValueError('Could not be represented as a unsigned number in 8 bits.\nLocation:PC='+hex(pc-1))
im_mask = self.immediate_mask[self.immediate_length[word[0]]]
try:
vector = self.get_immediate('0' + word[1], pc, im_mask)
except BaseException:
raise SyntaxError('Invalid syntax.\nLocation:PC='+hex(pc-1))
return self.ctrl_instructions[word[0]] + vector
else:
raise SyntaxError('Unrecognized opcode or syntax error.\nLocation:PC='+hex(pc-1))
def judge_immediate(self, num, high, low, pc):
if (num.startswith('0x') or num.startswith('0X')) and all(i in '-0123456789abcdefABCDEF' for i in num[2:]) and '-' not in num[3:]:
try:
n = int(num[2:], base=16)
except:
raise ValueError('Invalid immediate number input.\nLocation:PC='+hex(pc-1))
return n <= high and n >= low
elif (num.startswith('x') or num.startswith('X')) and all(i in '-0123456789abcdefABCDEF' for i in num[1:]) and '-' not in num[2:]:
try:
n = int(num[1:], base=16)
except:
raise ValueError('Invalid immediate number input.\nLocation:PC='+hex(pc-1))
return n <= high and n >= low
elif (num.startswith('#')) and all(i in '-0123456789' for i in num[1:]) and '-' not in num[2:]:
try:
n = int(num[1:], base=10)
except:
raise ValueError('Invalid immediate number input.\nLocation:PC='+hex(pc-1))
return n <= high and n >= low
else:
try:
n = int(num)
return n <= high and n >= low
except ValueError:
raise ValueError('Invalid immediate number input.\nLocation:PC='+hex(pc-1)) from None
def get_immediate(self, word, pc,mask=0xFFFF):
if (word.startswith('0x') or word.startswith('0X')) and all(i in '-0123456789abcdefABCDEF' for i in word[2:]) and '-' not in word[3:]:
return int(word[2:], base=16) & mask
elif ((word.startswith('x') or word.startswith('X')) and all(i in '-0123456789abcdefABCDEF' for i in word[1:]) and '-' not in word[2:]):
return int(word[1:], base=16) & mask
elif word.startswith('#'):
return int(word[1:]) & mask
else:
try:
return int(word)&mask
except BaseException:
raise ValueError('Invalid immedate.\nLocation:PC='+hex(pc-1))
def regulate(self):
with open(self.address, 'r+') as f:
content = f.readlines()
instructions=[]
for instruction in content:
a = instruction.strip().replace('\t', ' ').replace(',', ', ').split(' ')
while '' in a:
a.remove('')
flag = 0
for i in range(0, len(a)):
if ',' in a[i]:
a[i] = a[i].replace(',','')
if ';' in a[i]:
flag = 1
break
if flag:
a = a[:i]
if len(a) != 0:
instructions.append(a)
# print(a)
return instructions
def assemble(self):
binary_code = []
# pass one
content = self.regulate()
if content[0][0] != '.ORIG':
print("Expected .ORIG but not found!\n")
return None
else:
pc = int('0' + content[0][1], base=16)
lc = pc
if content[::-1][0][0] != '.END':
print("Expected .END but not found!\n")
return None
self.symbol_table = {}
# string_pattern = r'^'"."'$'
for instruction in content:
if instruction[0].upper() not in self.opcode_table and instruction[0] not in self.pseudo_ops:
self.symbol_table[instruction[0]] = hex(lc)
instruction.remove(instruction[0])
if len(instruction) >= 2 and instruction[0] == '.BLKW':
lc += int(instruction[1])
elif len(instruction) >= 2 and instruction[0] == '.STRINGZ':
lc += int(len(instruction[1])) - 1
else:
lc += 1
elif instruction[0] != '.ORIG' and instruction[0] != '.END':
lc += 1
# print(content)
# print(self.symbol_table)
# pass two
for instruction in content:
if len(instruction) < 1:
raise SyntaxError('Invalid syntax.\nLocation:PC='+hex(pc-1))
if instruction[0] in self.opcode_table.keys() or instruction[0].upper() in self.opcode_table.keys():
pc += 1
# print(instruction)
if instruction[0] in self.op_instructions or instruction[0].upper() in self.op_instructions:
code = self.get_op_instruction(instruction, pc)
# print('0'*(16-len(bin(code)[2:]))+bin(code)[2:])
elif instruction[0] in | |
from torch.nn.functional import conv1d, conv2d, fold
import torch.nn as nn
import torch
import numpy as np
from time import time
from ..utils import *
from scipy.signal import get_window
class STFTBase(nn.Module):
"""
STFT and iSTFT share the same `inverse_stft` function
"""
def inverse_stft(
self, X, kernel_cos, kernel_sin, onesided=True, length=None, refresh_win=True
):
# If the input spectrogram contains only half of the n_fft
# Use extend_fbins function to get back another half
if onesided:
# extend freq from fft//2+1 to fft
X = extend_fbins(X) # (1, 513, 663, 2) -> (1, 1024, 663, 2)
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1] # (1, 1024, 663)
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1) # [1, 1024, 663] -> [1, 1, 1024, 663]
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, kernel_cos, stride=(1, 1))
# X_real_bc [1, 1, 1024, 663] input [N, C_in, H, W]
# kernel_cos [1024, 1, 1024, 1] weight [C_out, C_in, kH, kW]
# a1 [1, 1024, 1, 663]
b2 = conv2d(X_imag_bc, kernel_sin, stride=(1, 1))
# compute real and imag part. signal lies in the real part
real = (a1 - b2) / self.n_fft
real_buffer = torch.zeros_like(real)
hM = int(self.n_fft)//2
real_buffer[:,:hM,:,:] = real[:,-hM:,:,:]
real_buffer[:,-hM:,:,:] = real[:,:hM,:,:]
real = real_buffer
real = real.squeeze(-2) * self.window_mask
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, "w_sum") == False or refresh_win == True:
self.w_sum = torch_window_sumsquare(
self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft
).flatten()
self.nonzero_indices = self.w_sum > 1e-10
else:
pass
real[:, self.nonzero_indices] = real[:, self.nonzero_indices].div(
self.w_sum[self.nonzero_indices]
)
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount : -self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount : self.pad_amount + length]
else:
real = real[:, :length]
return real
### --------------------------- Spectrogram Classes ---------------------------###
class STFT(STFTBase):
"""This function is to calculate the short-time Fourier transform (STFT) of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``nn.Module``, therefore, the usage is same as ``nn.Module``.
Parameters
----------
n_fft : int
Size of Fourier transform. Default value is 2048.
win_length : int
the size of window frame and STFT filter.
Default: None (treated as equal to n_fft)
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
freq_scale : 'linear', 'log', 'log2' or 'no'
Determine the spacing between each frequency bin. When `linear`, 'log' or `log2` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the STFT kernel, if ``True``, the time index is the center of
the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
iSTFT : bool
To activate the iSTFT module or not. By default, it is False to save GPU memory.
Note: The iSTFT kernel is not trainable. If you want
a trainable iSTFT, use the iSTFT module.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``
output_format : str
Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``.
The output_format can also be changed during the ``forward`` method.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.STFT()
>>> specs = spec_layer(x)
"""
def __init__(
self,
n_fft=2048,
win_length=None,
freq_bins=None,
hop_length=None,
window="hann",
freq_scale="no",
center=True,
pad_mode="reflect",
iSTFT=False,
fmin=50,
fmax=6000,
sr=22050,
trainable=False,
output_format="Complex",
verbose=True,
):
super().__init__()
# Trying to make the default setting same as librosa
if win_length == None:
win_length = n_fft
if hop_length == None:
hop_length = int(win_length // 4)
self.output_format = output_format
self.trainable = trainable
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.freq_bins = freq_bins
self.trainable = trainable
self.pad_amount = self.n_fft // 2
self.window = window
self.win_length = win_length
self.iSTFT = iSTFT
self.trainable = trainable
start = time()
# Create filter windows for stft
(
kernel_sin,
kernel_cos,
self.bins2freq,
self.bin_list,
window_mask,
) = create_fourier_kernels(
n_fft,
win_length=win_length,
freq_bins=freq_bins,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=verbose,
)
# (513, 1, 1024)
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)
kernel_sin = create_buffer(kernel_sin, n_fft)
kernel_cos = create_buffer(kernel_cos, n_fft)
# In this way, the inverse kernel and the forward kernel do not share the same memory...
# (1024, 1, 1024)
kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)
kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)
kernel_sin_inv = create_buffer(kernel_sin_inv, n_fft)
kernel_cos_inv = create_buffer(kernel_cos_inv, n_fft)
if iSTFT:
self.register_buffer("kernel_sin_inv", kernel_sin_inv.unsqueeze(-1))
self.register_buffer("kernel_cos_inv", kernel_cos_inv.unsqueeze(-1))
# Making all these variables nn.Parameter, so that the model can be used with nn.Parallel
# self.kernel_sin = nn.Parameter(self.kernel_sin, requires_grad=self.trainable)
# self.kernel_cos = nn.Parameter(self.kernel_cos, requires_grad=self.trainable)
# Applying window functions to the Fourier kernels
window_mask = torch.tensor(window_mask)
wsin = kernel_sin * window_mask
wcos = kernel_cos * window_mask
if self.trainable == False:
self.register_buffer("wsin", wsin)
self.register_buffer("wcos", wcos)
if self.trainable == True:
wsin = nn.Parameter(wsin, requires_grad=self.trainable)
wcos = nn.Parameter(wcos, requires_grad=self.trainable)
self.register_parameter("wsin", wsin)
self.register_parameter("wcos", wcos)
# Prepare the shape of window mask so that it can be used later in inverse
self.register_buffer("window_mask", window_mask.unsqueeze(0).unsqueeze(-1))
if verbose == True:
print(
"STFT kernels created, time used = {:.4f} seconds".format(
time() - start
)
)
else:
pass
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
output_format : str
Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.
Default value is ``Complex``.
"""
output_format = output_format or self.output_format
self.num_samples = x.shape[-1]
x = broadcast_dim(x)
if self.center:
if self.pad_mode == "constant":
padding = nn.ConstantPad1d(self.pad_amount, 0)
elif self.pad_mode == "reflect":
if self.num_samples < self.pad_amount:
raise AssertionError(
"Signal length shorter than reflect padding length (n_fft // 2)."
)
padding = nn.ReflectionPad1d(self.pad_amount)
x = padding(x)
spec_imag = conv1d(x, self.wsin, stride=self.stride)
spec_real = conv1d(
x, self.wcos, stride=self.stride
) # Doing STFT by using conv1d
# remove redundant parts
spec_real = spec_real[:, : self.freq_bins, :]
spec_imag = spec_imag[:, : self.freq_bins, :]
if output_format == "Magnitude":
spec = spec_real.pow(2) + spec_imag.pow(2)
if self.trainable == True:
return torch.sqrt(
spec + 1e-8
) # prevent Nan gradient when sqrt(0) due to output=0
else:
return torch.sqrt(spec)
elif output_format == "Complex":
return torch.stack(
(spec_real, -spec_imag), -1
) # Remember the minus sign for imaginary part
elif | |
"tableAlias": "dim_efficiency_inspection_item",
"layer": "dim",
"tableName": "<dim_efficiency_inspection_item_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "INSPECTION_ITEM",
"alias": "巡检项",
"partitionFormat": "d",
"domainId": 3
},
"fieldsReq": [{
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "巡检项ID",
"description": "主键",
"dim": "id",
"type": "STRING"
}]
}, {
"metaReq": {
"description": "追踪明细模型",
"tableAlias": "dwd_original_trace_di",
"layer": "dwd",
"tableName": "<dwd_original_trace_di_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "TRACE",
"alias": "追踪",
"partitionFormat": "d",
"domainId": 4,
"dataMode": "di"
},
"fieldsReq": [{
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "追踪ID",
"description": "主键",
"dim": "id",
"type": "STRING"
}]
}, {
"metaReq": {
"description": "应用请求成功率明细(SLA)",
"tableAlias": "dwd_stability_success_rate_sla_di",
"layer": "dwd",
"tableName": "<dwd_stability_success_rate_sla_di_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "SUCCESS_RATE_SLA",
"alias": "应用请求成功率明细(SLA)",
"partitionFormat": "d",
"domainId": 1,
"dataMode": "di"
},
"fieldsReq": [{
"buildIn": True,
"field": "app_id",
"nullable": False,
"alias": "应用ID",
"description": "应用ID",
"dim": "app_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "应用ID",
"description": "主键",
"dim": "id",
"type": "STRING"
}, {
"buildIn": True,
"field": "success_rate",
"nullable": False,
"alias": "请求成功率",
"description": "请求成功率",
"dim": "success_rate",
"type": "INTEGER"
}, {
"buildIn": True,
"field": "timestamp",
"nullable": False,
"alias": "时间戳",
"description": "计量时间戳",
"dim": "timestamp",
"type": "DATE"
}]
}, {
"metaReq": {
"description": "应用平均响应时间明细",
"tableAlias": "dwd_original_app_avg_response_time_di",
"layer": "dwd",
"tableName": "<dwd_original_app_avg_response_time_di_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "APP_AVG_RESPONSE_TIME",
"alias": "应用平均响应时间明细",
"partitionFormat": "d",
"domainId": 4,
"dataMode": "di"
},
"fieldsReq": [{
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "应用ID",
"description": "主键",
"dim": "id",
"type": "STRING"
}]
}, {
"metaReq": {
"description": "指标数据表",
"tableAlias": "dwd_original_metric_data_di",
"layer": "dwd",
"tableName": "<dwd_original_metric_data_di_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "METRIC_DATA",
"alias": "指标数据",
"partitionFormat": "d",
"domainId": 4,
"dataMode": "di"
},
"fieldsReq": [{
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "主键",
"description": "主键",
"dim": "id",
"type": "STRING"
}, {
"buildIn": True,
"field": "labels",
"nullable": True,
"alias": "实例标签",
"description": "指标实例标签",
"dim": "labels",
"type": "OBJECT"
}, {
"buildIn": True,
"field": "metric_id",
"nullable": False,
"alias": "指标ID",
"description": "指标ID",
"dim": "metric_id",
"type": "INTEGER"
}, {
"buildIn": True,
"field": "metric_name",
"nullable": False,
"alias": "指标名称",
"description": "指标名称",
"dim": "metric_name",
"type": "STRING"
}, {
"buildIn": True,
"field": "timestamp",
"nullable": True,
"alias": "数据时间戳",
"description": "指标数据产出时间",
"dim": "timestamp",
"type": "DATE"
}, {
"buildIn": True,
"field": "type",
"nullable": True,
"alias": "指标类型",
"description": "指标类型",
"dim": "type",
"type": "STRING"
}, {
"buildIn": True,
"field": "uid",
"nullable": False,
"alias": "实例唯一身份ID",
"description": "实例唯一身份ID",
"dim": "uid",
"type": "STRING"
}, {
"buildIn": True,
"field": "value",
"nullable": False,
"alias": "指标值",
"description": "指标值",
"dim": "value",
"type": "FLOAT"
}]
}, {
"metaReq": {
"description": "POD资源分配模型",
"tableAlias": "dwd_cost_pod_resource_allocation_di",
"layer": "dwd",
"tableName": "<dwd_cost_pod_resource_allocation_di_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "POD_RESOURCE_ALLOCATION",
"alias": "POD资源分配明细",
"partitionFormat": "d",
"domainId": 2,
"dataMode": "di"
},
"fieldsReq": [{
"buildIn": True,
"field": "appComponentInstanceId",
"nullable": True,
"alias": "应用组件实例ID",
"description": "应用组件实例ID",
"dim": "app_component_instance_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "appComponentName",
"nullable": True,
"alias": "应用组件名称",
"description": "应用组件名称",
"dim": "app_component_name",
"type": "STRING"
}, {
"buildIn": True,
"field": "appId",
"nullable": False,
"alias": "应用ID",
"description": "应用ID",
"dim": "app_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "appInstanceId",
"nullable": False,
"alias": "应用实例ID",
"description": "应用实例ID",
"dim": "app_instance_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "appInstanceName",
"nullable": True,
"alias": "应用实例名称",
"description": "应用实例名称",
"dim": "app_instance_name",
"type": "STRING"
}, {
"buildIn": True,
"field": "clusterId",
"nullable": True,
"alias": "集群ID",
"description": "集群ID",
"dim": "cluster_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "POD名称",
"description": "主键",
"dim": "id",
"type": "STRING"
}, {
"buildIn": True,
"field": "namespace",
"nullable": True,
"alias": "命名空间",
"description": "命名空间",
"dim": "namespace",
"type": "STRING"
}, {
"buildIn": True,
"field": "podCpuCoreHoursAllocation",
"nullable": True,
"alias": "cpu小时分配core数",
"description": "cpu小时分配core数",
"dim": "pod_cpu_core_hours_allocation",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "podCpuCoreHoursUsageAvg",
"nullable": True,
"alias": "cpu小时平均使用core数",
"description": "cpu小时平均使用core数",
"dim": "pod_cpu_core_hours_usage_avg",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "podName",
"nullable": False,
"alias": "POD名称",
"description": "pod名称",
"dim": "pod_name",
"type": "STRING"
}, {
"buildIn": True,
"field": "podPVCGbHoursAllocation",
"nullable": True,
"alias": "pvc小时分配GB",
"description": "pvc小时分配GB",
"dim": "pod_pvc_gb_hours_allocation",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "podRamGbHoursAllocation",
"nullable": True,
"alias": "ram小时分配GB",
"description": "ram小时分配GB",
"dim": "pod_ram_gb_hours_allocation",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "podRamGbHoursUsageAvg",
"nullable": True,
"alias": "ram小时平均使用量GB",
"description": "ram小时平均使用量GB",
"dim": "pod_ram_gb_hours_usage_avg",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "timestamp",
"nullable": True,
"alias": "计算时间戳",
"description": "计算时间戳",
"dim": "timestamp",
"type": "DATE"
}]
}, {
"metaReq": {
"description": "资源计价维度表",
"tableAlias": "dim_cost_resource_price",
"layer": "dim",
"tableName": "<dim_cost_resource_price_{now/y{yyyy}}>",
"buildIn": True,
"lifecycle": 365,
"name": "RESOURCE_PRICE",
"alias": "资源计价维度表",
"partitionFormat": "y",
"domainId": 2
},
"fieldsReq": [{
"buildIn": True,
"field": "clusterId",
"nullable": True,
"alias": "集群ID",
"description": "集群ID",
"dim": "cluster_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "currency",
"nullable": False,
"alias": "计价货币",
"description": "计价货币",
"dim": "currency",
"type": "STRING"
}, {
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "主键",
"description": "集群ID",
"dim": "id",
"type": "STRING"
}, {
"buildIn": True,
"field": "monthlyCpuPrice",
"nullable": False,
"alias": "单核CPU每月计价",
"description": "单核CPU每月计价",
"dim": "monthly_cpu_price",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "monthlyRamPrice",
"nullable": False,
"alias": "RAM每月计价(GB)",
"description": "RAM每月计价(GB)",
"dim": "monthly_ram_price",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "monthlyStoragePrice",
"nullable": False,
"alias": "存储每月计价(GB)",
"description": "存储每月计价(GB)",
"dim": "monthly_storage_price",
"type": "DOUBLE"
}]
}, {
"metaReq": {
"description": "应用成本模型",
"tableAlias": "ads_app_cost",
"layer": "ads",
"tableName": "<ads_app_cost_{now/d}>",
"buildIn": True,
"lifecycle": 180,
"name": "APP_COST",
"alias": "应用成本",
"partitionFormat": "d",
"tag": "运营"
},
"fieldsReq": [{
"buildIn": True,
"field": "appId",
"nullable": True,
"alias": "应用ID",
"description": "应用ID",
"dim": "app_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "appInstanceId",
"nullable": True,
"alias": "应用实例ID",
"description": "应用实例ID",
"dim": "app_instance_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "cost",
"nullable": True,
"alias": "总成本",
"description": "总成本",
"dim": "cost",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "cpuCoreAllocation",
"nullable": True,
"alias": "cpu分配core数",
"description": "cpu分配core数",
"dim": "cpu_core_allocation",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "cpuCoreUsageAvg",
"nullable": True,
"alias": "cpu平均core数",
"description": "cpu平均core数",
"dim": "cpu_core_usage_avg",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "cpuCost",
"nullable": True,
"alias": "cpu成本",
"description": "cpu成本",
"dim": "cpu_cost",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "cpuEfficiency",
"nullable": True,
"alias": "CPU水位",
"description": "CPU水位",
"dim": "cpu_efficiency",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "currency",
"nullable": False,
"alias": "货币单位",
"description": "货币单位",
"dim": "currency",
"type": "STRING"
}, {
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "主键",
"description": "主键",
"dim": "id",
"type": "STRING"
}, {
"buildIn": True,
"field": "podCnt",
"nullable": True,
"alias": "pod数量",
"description": "pod数量",
"dim": "pod_cnt",
"type": "INTEGER"
}, {
"buildIn": True,
"field": "pvcCost",
"nullable": True,
"alias": "pvc成本",
"description": "pvc成本",
"dim": "pvc_cost",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "pvcGbAllocation",
"nullable": True,
"alias": "pvc分配GB",
"description": "pvc分配GB",
"dim": "pvc_gb_allocation",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "ramCost",
"nullable": True,
"alias": "ram成本",
"description": "ram成本",
"dim": "ram_cost",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "ramEfficiency",
"nullable": True,
"alias": "ram水位",
"description": "ram水位",
"dim": "ram_efficiency",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "ramGbAllocation",
"nullable": True,
"alias": "ram分配GB",
"description": "ram分配GB",
"dim": "ram_gb_allocation",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "ramGbUsageAvg",
"nullable": True,
"alias": "ram平均使用GB",
"description": "ram平均使用GB",
"dim": "ram_gb_usage_avg",
"type": "DOUBLE"
}, {
"buildIn": True,
"field": "timestamp",
"nullable": False,
"alias": "统计时间",
"description": "统计时间",
"dim": "timestamp",
"type": "DATE"
}]
}, {
"metaReq": {
"description": "构建交付",
"tableAlias": "dwd_efficiency_delivery_di",
"layer": "dwd",
"tableName": "<dwd_efficiency_delivery_di_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "DELIVERY",
"alias": "构建交付",
"partitionFormat": "d",
"domainId": 3,
"dataMode": "di"
},
"fieldsReq": [{
"buildIn": True,
"field": "appId",
"nullable": False,
"alias": "应用ID",
"description": "应用ID",
"dim": "app_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "endTime",
"nullable": True,
"alias": "构建结束时间",
"description": "构建结束时间",
"dim": "end_time",
"type": "DATE"
}, {
"buildIn": True,
"field": "id",
"nullable": False,
"alias": "构建任务ID",
"description": "主键appPackageTaskId",
"dim": "id",
"type": "STRING"
}, {
"buildIn": True,
"field": "startTime",
"nullable": True,
"alias": "构建开始时间",
"description": "构建开始时间",
"dim": "start_time",
"type": "DATE"
}, {
"buildIn": True,
"field": "status",
"nullable": True,
"alias": "构建状态",
"description": "构建状态",
"dim": "status",
"type": "STRING"
}, {
"buildIn": True,
"field": "teamId",
"nullable": True,
"alias": "团队ID",
"description": "团队ID",
"dim": "team_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "version",
"nullable": True,
"alias": "构建版本",
"description": "构建版本",
"dim": "version",
"type": "STRING"
}]
}, {
"metaReq": {
"description": "部署",
"tableAlias": "dwd_efficiency_deployment_di",
"layer": "dwd",
"tableName": "<dwd_efficiency_deployment_di_{now/d}>",
"buildIn": True,
"lifecycle": 7,
"name": "DEPLOYMENT",
"alias": "部署",
"partitionFormat": "d",
"domainId": 3,
"dataMode": "di"
},
"fieldsReq": [{
"buildIn": True,
"field": "appId",
"nullable": False,
"alias": "应用ID",
"description": "应用ID",
"dim": "app_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "appInstanceId",
"nullable": False,
"alias": "应用实例ID",
"description": "应用实例ID",
"dim": "app_instance_id",
"type": "STRING"
}, {
"buildIn": True,
"field": "appInstanceName",
"nullable": False,
"alias": "应用实例名称",
"description": "应用实例名称",
"dim": "app_instance_name",
"type": "STRING"
}, {
"buildIn": True,
"field": "appName",
"nullable": False,
"alias": "应用名称",
"description": "应用名称",
"dim": "app_name",
| |
= apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['anqp_3gpp_cell_net'] = "555,444"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].hs20_enable()
dev[0].scan_for_bss(bssid, freq="2412")
id = dev[0].add_cred_values({'realm': "example.com",
'domain': "example.com",
'username': "test",
'password': "<PASSWORD>",
'eap': 'TTLS'})
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE ssid->eap.eap_methods = os_malloc()")
with alloc_fail(dev[0], 1, "interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({'realm': "example.com",
'domain': "example.com",
'username': "hs20-<EMAIL>",
'password': "password"})
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE anon = os_malloc()")
with alloc_fail(dev[0], 1, "interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE Successful connection with cred->username including realm")
dev[0].request("INTERWORKING_CONNECT " + bssid)
dev[0].wait_connected()
dev[0].remove_cred(id)
dev[0].wait_disconnected()
id = dev[0].add_cred_values({'realm': "example.com",
'domain': "example.com",
'username': "hs20-test",
'password': "password"})
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE anon = os_malloc() (second)")
with alloc_fail(dev[0], 1, "interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
with alloc_fail(dev[0], 1, "wpa_config_add_network;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
with alloc_fail(dev[0], 1, "=interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set(eap)")
with alloc_fail(dev[0], 1, "wpa_config_parse_eap;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set(TTLS-NON_EAP_MSCHAPV2-phase2)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({'roaming_consortium': "112233",
'domain': "example.com",
'username': "hs20-test",
'password': "password",
'eap': 'TTLS',
'phase2': "auth=MSCHAPV2"})
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE anon = os_strdup()")
with alloc_fail(dev[0], 2, "interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(anonymous_identity)")
with alloc_fail(dev[0], 1, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE Successful connection with cred->realm not included")
dev[0].request("INTERWORKING_CONNECT " + bssid)
dev[0].wait_connected()
dev[0].remove_cred(id)
dev[0].wait_disconnected()
id = dev[0].add_cred_values({'roaming_consortium': "112233",
'domain': "example.com",
'realm': "example.com",
'username': "user",
'password': "password",
'eap': 'PEAP'})
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE id = os_strdup()")
with alloc_fail(dev[0], 2, "interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(identity)")
with alloc_fail(dev[0], 1, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({'roaming_consortium': "112233",
'domain': "example.com",
'realm': "example.com",
'username': "user",
'password': "password",
'eap': "TTLS"})
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE wpa_config_set_quoted(identity) (second)")
with alloc_fail(dev[0], 2, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(password)")
with alloc_fail(dev[0], 3, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
with alloc_fail(dev[0], 1, "wpa_config_add_network;interworking_connect_roaming_consortium"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
with alloc_fail(dev[0], 1, "=interworking_connect_roaming_consortium"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({'roaming_consortium': "112233",
'domain': "example.com",
'realm': "example.com",
'username': "user",
'eap': "PEAP"})
dev[0].set_cred(id, "password", "<PASSWORD>")
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE wpa_config_set(password)")
with alloc_fail(dev[0], 3, "wpa_config_set;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
with alloc_fail(dev[0], 1, "interworking_set_hs20_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({'realm': "example.com",
'domain': "example.com",
'username': "certificate-user",
'phase1': "include_tls_length=0",
'domain_suffix_match': "example.com",
'ca_cert': "auth_serv/ca.pem",
'client_cert': "auth_serv/user.pem",
'private_key': "auth_serv/user.key",
'private_key_passwd': "<PASSWORD>"})
interworking_select(dev[0], bssid, "home", freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE wpa_config_set_quoted(client_cert)")
with alloc_fail(dev[0], 2, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(private_key)")
with alloc_fail(dev[0], 3, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(private_key_passwd)")
with alloc_fail(dev[0], 4, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(ca_cert)")
with alloc_fail(dev[0], 5, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(domain_suffix_match)")
with alloc_fail(dev[0], 6, "=wpa_config_set_quoted;interworking_set_eap_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
with alloc_fail(dev[0], 1, "interworking_set_hs20_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({'imsi': "555444-333222111", 'eap': "SIM",
'milenage': "5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123"})
interworking_select(dev[0], bssid, freq=2412)
dev[0].dump_monitor()
with alloc_fail(dev[0], 1, "interworking_set_hs20_params"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set_quoted(password;<PASSWORD>)")
with alloc_fail(dev[0], 2, "=wpa_config_set_quoted;interworking_connect_3gpp"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set(eap)")
with alloc_fail(dev[0], 1, "wpa_config_parse_eap;wpa_config_set;interworking_connect_3gpp"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE set_root_nai:wpa_config_set(identity)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;interworking_connect_3gpp"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({'roaming_consortium': "112233",
'eap': 'TTLS',
'username': "<EMAIL>",
'password': "password"})
interworking_select(dev[0], bssid, freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE Interworking: No EAP method set for credential using roaming consortium")
dev[0].request("INTERWORKING_CONNECT " + bssid)
dev[0].remove_cred(id)
hapd.disable()
params = hs20_ap_params()
params['nai_realm'] = "0,example.com,25[3:26]"
hapd = hostapd.add_ap(apdev[0], params)
id = dev[0].add_cred_values({'realm': "example.com",
'domain': "example.com",
'username': "hs20-test",
'password': "password"})
interworking_select(dev[0], bssid, freq=2412)
dev[0].dump_monitor()
dev[0].request("NOTE wpa_config_set(PEAP/FAST-phase1)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set(PEAP/FAST-pac_interworking)")
with alloc_fail(dev[0], 2, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("NOTE wpa_config_set(PEAP/FAST-phase2)")
with alloc_fail(dev[0], 3, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
hapd.disable()
params = hs20_ap_params()
params['nai_realm'] = "0,example.com,21"
hapd = hostapd.add_ap(apdev[0], params)
interworking_select(dev[0], bssid, freq=2412)
dev[0].request("NOTE wpa_config_set(TTLS-defaults-phase2)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
hapd.disable()
params = hs20_ap_params()
params['nai_realm'] = "0,example.com,21[2:3]"
hapd = hostapd.add_ap(apdev[0], params)
interworking_select(dev[0], bssid, freq=2412)
dev[0].request("NOTE wpa_config_set(TTLS-NON_EAP_MSCHAP-phase2)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
hapd.disable()
params = hs20_ap_params()
params['nai_realm'] = "0,example.com,21[2:2]"
hapd = hostapd.add_ap(apdev[0], params)
interworking_select(dev[0], bssid, freq=2412)
dev[0].request("NOTE wpa_config_set(TTLS-NON_EAP_CHAP-phase2)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
hapd.disable()
params = hs20_ap_params()
params['nai_realm'] = "0,example.com,21[2:1]"
hapd = hostapd.add_ap(apdev[0], params)
interworking_select(dev[0], bssid, freq=2412)
dev[0].request("NOTE wpa_config_set(TTLS-NON_EAP_PAP-phase2)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
hapd.disable()
params = hs20_ap_params()
params['nai_realm'] = "0,example.com,21[3:26]"
hapd = hostapd.add_ap(apdev[0], params)
interworking_select(dev[0], bssid, freq=2412)
dev[0].request("NOTE wpa_config_set(TTLS-EAP-MSCHAPV2-phase2)")
with alloc_fail(dev[0], 1, "wpa_config_parse_str;wpa_config_set;interworking_connect"):
dev[0].request("INTERWORKING_CONNECT " + bssid)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].remove_cred(id)
def test_ap_hs20_unexpected(dev, apdev):
"""Unexpected Hotspot 2.0 AP configuration"""
skip_without_tkip(dev[0])
skip_without_tkip(dev[1])
skip_without_tkip(dev[2])
check_eap_capa(dev[0], "MSCHAPV2")
bssid = apdev[0]['bssid']
params = hostapd.wpa_eap_params(ssid="test-hs20-fake")
params['wpa'] = "3"
params['wpa_pairwise'] = "TKIP CCMP"
params['rsn_pairwise'] = "CCMP"
params['ieee80211w'] = "1"
#params['vendor_elements'] = 'dd07506f9a10140000'
params['vendor_elements'] = 'dd04506f9a10'
hostapd.add_ap(apdev[0], params)
dev[0].hs20_enable()
dev[0].scan_for_bss(bssid, freq="2412")
dev[0].connect("test-hs20-fake", key_mgmt="WPA-EAP", eap="TTLS",
pairwise="TKIP",
identity="hs20-test", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
scan_freq="2412")
dev[1].hs20_enable()
dev[1].scan_for_bss(bssid, freq="2412")
dev[1].connect("test-hs20-fake", key_mgmt="WPA-EAP", eap="TTLS",
proto="WPA",
identity="hs20-test", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
scan_freq="2412")
dev[2].hs20_enable()
dev[2].scan_for_bss(bssid, freq="2412")
dev[2].connect("test-hs20-fake", key_mgmt="WPA-EAP", eap="TTLS",
ieee80211w="1",
proto="RSN", pairwise="CCMP",
identity="hs20-test", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
scan_freq="2412")
def test_ap_interworking_element_update(dev, apdev):
"""Dynamic Interworking element update"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hapd = hostapd.add_ap(apdev[0], params)
dev[0].hs20_enable()
dev[0].scan_for_bss(bssid, freq="2412")
bss = dev[0].get_bss(bssid)
logger.info("Before update: " + str(bss))
if '6b091e0701020000000300' not in bss['ie']:
raise Exception("Expected Interworking element not seen before update")
# Update configuration parameters related to Interworking element
hapd.set('access_network_type', '2')
hapd.set('asra', '1')
hapd.set('esr', '1')
hapd.set('uesa', '1')
hapd.set('venue_group', '2')
hapd.set('venue_type', '8')
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[0].request("BSS_FLUSH 0")
dev[0].scan_for_bss(bssid, freq="2412", force_scan=True)
bss = dev[0].get_bss(bssid)
logger.info("After update: " + str(bss))
if '6b09f20208020000000300' not in bss['ie']:
raise Exception("Expected Interworking element not seen after update")
def test_ap_hs20_terms_and_conditions(dev, apdev):
"""Hotspot 2.0 Terms and Conditions signaling"""
check_eap_capa(dev[0], "MSCHAPV2")
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['hs20_t_c_filename'] = 'terms-and-conditions'
params['hs20_t_c_timestamp'] = '123456789'
hostapd.add_ap(apdev[0], params)
dev[0].hs20_enable()
dev[0].connect("test-hs20", proto="RSN", key_mgmt="WPA-EAP", eap="TTLS",
identity="hs20-t-c-test", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
ieee80211w='2', scan_freq="2412")
ev = dev[0].wait_event(["HS20-T-C-ACCEPTANCE"], timeout=5)
if ev is None:
raise Exception("Terms and Conditions Acceptance notification not received")
url = "https://example.com/t_and_c?addr=%s&ap=123" % dev[0].own_addr()
if url not in ev:
raise Exception("Unexpected URL: " + ev)
def test_ap_hs20_terms_and_conditions_coa(dev, apdev):
"""Hotspot 2.0 Terms and Conditions signaling - CoA"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
check_eap_capa(dev[0], "MSCHAPV2")
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['hs20_t_c_filename'] = 'terms-and-conditions'
params['hs20_t_c_timestamp'] = '123456789'
params['own_ip_addr'] = "127.0.0.1"
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].hs20_enable()
dev[0].connect("test-hs20", proto="RSN", key_mgmt="WPA-EAP", eap="TTLS",
identity="hs20-t-c-test", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
ieee80211w='2', scan_freq="2412")
ev = hapd.wait_event(["HS20-T-C-FILTERING-ADD"], timeout=5)
if ev is None:
raise Exception("Terms and Conditions filtering not enabled")
if ev.split(' ')[1] != dev[0].own_addr():
raise Exception("Unexpected STA address for filtering: " + ev)
ev = dev[0].wait_event(["HS20-T-C-ACCEPTANCE"], timeout=5)
if ev is None:
raise Exception("Terms and Conditions Acceptance notification not received")
url = "https://example.com/t_and_c?addr=%s&ap=123" % dev[0].own_addr()
if url not in ev:
raise Exception("Unexpected URL: " + ev)
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret=b"secret", dict=dict)
srv.retries = 1
srv.timeout = 1
sta = hapd.get_sta(dev[0].own_addr())
multi_sess_id = sta['authMultiSessionId']
logger.info("CoA-Request with matching Acct-Session-Id")
vsa = binascii.unhexlify('00009f68090600000000')
req = radius_das.CoAPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
Acct_Multi_Session_Id=multi_sess_id,
Chargeable_User_Identity="hs20-cui",
Event_Timestamp=int(time.time()),
Vendor_Specific=vsa)
reply = srv.SendPacket(req)
logger.debug("RADIUS response from hostapd")
for i in list(reply.keys()):
logger.debug("%s: %s" % (i, reply[i]))
if reply.code != pyrad.packet.CoAACK:
raise Exception("CoA-Request failed")
ev = hapd.wait_event(["HS20-T-C-FILTERING-REMOVE"], timeout=5)
if ev is None:
raise Exception("Terms and Conditions filtering not disabled")
if ev.split(' ')[1] != dev[0].own_addr():
raise Exception("Unexpected STA address for filtering: " + ev)
def test_ap_hs20_terms_and_conditions_sql(dev, apdev, params):
"""Hotspot 2.0 Terms and Conditions using SQLite for user DB"""
addr = dev[0].own_addr()
run_ap_hs20_terms_and_conditions_sql(dev, apdev, params,
"https://example.com/t_and_c?addr=@1@&ap=123",
"https://example.com/t_and_c?addr=" + addr + "&ap=123")
def test_ap_hs20_terms_and_conditions_sql2(dev, apdev, params):
"""Hotspot 2.0 Terms and Conditions using SQLite for user DB"""
addr = dev[0].own_addr()
run_ap_hs20_terms_and_conditions_sql(dev, apdev, params,
"https://example.com/t_and_c?addr=@1@",
"https://example.com/t_and_c?addr=" + addr)
def run_ap_hs20_terms_and_conditions_sql(dev, apdev, params, url_template,
url_expected):
check_eap_capa(dev[0], "MSCHAPV2")
try:
import | |
<reponame>arfontalvoANU/srlife<filename>examples/gemasolar/verification/nashTubeStress.py
#!/usr/bin/env python3
# Copyright (C) 2021 <NAME>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
nashTubeStress.py
-- steady-state temperature distribution (Gauss-Seidel iteration)
-- biharmonic thermoelastic stress
See also:
-- Solar Energy 160 (2018) 368-379
-- https://doi.org/10.1016/j.solener.2017.12.003
"""
from math import exp, log, sqrt, pi, ceil, floor, asin
import numpy as np
from numpy import ma
import scipy.optimize as opt
#################################### CLASSES ###################################
class Grid:
"""
A cylindrical coordinate (theta, r) grid class
-- units are m, sec, K, W, Pa
"""
def __init__(self, nr=6, nt=61, rMin=0.5, rMax=0.7,
thetaMin=0, thetaMax=np.radians(180)):
self.nr, self.nt = nr, nt
self.a, self.b = rMin, rMax
r = np.linspace(rMin, rMax, nr)
theta = np.linspace(thetaMin, thetaMax, nt)
self.r, self.theta = np.meshgrid(r, theta)
self.dr = float(rMax-rMin)/(nr-1)
dTheta = float(thetaMax-thetaMin)/(nt-1)
# face surface (sf) areas:
self.sfRmin = (np.ones(nt) * pi * rMin) / (nt - 1)
self.sfRmin[0] *= 0.5; self.sfRmin[-1] *= 0.5
self.sfRmax = (np.ones(nt) * pi * rMax) / (nt - 1)
self.sfRmax[0] *= 0.5; self.sfRmax[-1] *= 0.5
# create 'ghost' elements for symmetry BCs:
theta = np.insert(theta, 0, thetaMin-dTheta)
theta = np.append(theta, thetaMax+dTheta)
self.meshR, self.meshTheta = np.meshgrid(r, theta)
# create constants for use in iterations:
self.twoDrR = 2 * self.dr * self.meshR[1:-1,1:-1]
self.dr2, self.dTheta2 = self.dr**2, dTheta**2
self.dTheta2R2 = self.dTheta2 * self.meshR[1:-1,1:-1]**2
self.dnr = (2. / self.dr2 + 2. / self.dTheta2R2)
# create some useful BC variables
self.cosTheta = np.cos(theta)
self.sinTheta = np.sin(theta)
self.fullTube = np.ones(len(theta))
self.halfTube = np.ones(len(theta))
self.halfTube[self.cosTheta<0] = 0.0
class Solver:
""" A Laplacian solver for steady-state conduction in cylinders
-- Gauss-Seidel iteration of T(r, theta)
-- bi-harmonic thermoelastic stress post-processing """
# Constants:
sigma = 5.67e-8 # Stefan-Boltzmann
def __init__(self, grid, debug=False, it='numpy', CG=8.5e5,
k=21, T_int=723.15, h_int=10e3, U=4.0, R_f=0., A=0.968,
epsilon=0.87, T_ext=293.15, h_ext=30., P_i=0e5,
T_0=0., alpha=20e-6, E=165e9, nu=0.31, n=1,
bend=False, GPS=True):
self.debug = debug
# Class constants and variables (default UNS S31600 @ 450degC):
self.g = grid
self.setIterator(it)
self.CG = CG # concentration (C) x solar constant (G)
self.k = k # thermal conductivity of tube
self.T_int = T_int # temperature of heat transfer fluid
self.h_int = h_int # constant int convection coefficient
self.R_f = R_f # internal fouling coefficient
self.A = A # tube external surface absorptance
self.epsilon = epsilon # tube external emmissivity
self.T_ext = T_ext # ambient temperature
self.h_ext = h_ext # ext convection coefficient (with wind)
self.P_i = P_i # internal pipe pressure
self.T_0 = T_0 # stress free temperature
self.alpha = alpha # thermal expansion coefficienct of tube
self.E = E # Modulus of elasticity
self.nu = nu # Poisson's coefficient
self.n = n # Number of Fourier 'frequencies'
self.bend = bend # switch to allow tube bending
self.GPS = GPS # switch to turn off generalised plane strain
self.meshT = np.ones((grid.nt+2, grid.nr), 'd') * T_int
self.T = self.meshT[1:-1,:] # remove symm for post-processing
def computeError(self):
""" Computes absolute error using an L2 norm for the solution.
This requires that self.T and self.old_T must be appropriately
setup - only used for numpyStep """
v = (self.meshT - self.old_T).flat
return np.sqrt(np.dot(v,v))
def numpyStep(self):
""" Gauss-Seidel iteration using numpy expression. """
self.old_T = self.meshT.copy()
# "Driving" BCs (heat flux, radiation and convection)
self.extBC()
self.intBC()
# Numpy iteration
self.meshT[1:-1,1:-1] = (
( self.meshT[1:-1,2:] - self.meshT[1:-1,:-2] )
/ self.g.twoDrR +
( self.meshT[1:-1,:-2] + self.meshT[1:-1,2:] )
/ self.g.dr2 +
( self.meshT[:-2,1:-1] + self.meshT[2:,1:-1] )
/ self.g.dTheta2R2
) / self.g.dnr
# Symmetry boundary conditions
self.symmetryBC()
return self.computeError()
def blitzStep(self):
""" DEPRECATED blitz using weave """
self.old_T = self.meshT.copy()
# "Driving" BCs (heat flux, radiation and convection)
self.extBC()
self.intBC()
# Prepare constants and arrays for blitz
T = self.meshT
twoDrR = self.g.twoDrR
dr2 = self.g.dr2
dTheta2R2 = self.g.dTheta2R2
dnr = self.g.dnr
expr = "T[1:-1,1:-1] = ("\
"( T[1:-1,2:] - T[1:-1,:-2] ) / twoDrR +"\
"( T[1:-1,:-2] + T[1:-1,2:] ) / dr2 +"\
"( T[:-2,1:-1] + T[2:,1:-1] ) / dTheta2R2"\
") / dnr"
weave.blitz(expr, check_size=0)
# Transfer result back to mesh/grid
self.meshT = T
# Symmetry boundary conditions
self.symmetryBC()
return self.computeError()
def inlineStep(self):
""" DEPRECATED inline C code """
# "Driving" BCs (heat flux, radiation and convection)
self.extBC()
self.intBC()
# Prepare constants and arrays for blitz
T = self.meshT
nt, nr = self.meshT.shape
twoDrR = self.g.twoDrR
dr2 = self.g.dr2
dTheta2R2 = self.g.dTheta2R2
dnr = self.g.dnr
code = """
#line 000 "nashTubeStress.py"
double tmp, err, diff;
err = 0.0;
for (int i=1; i<nt-1; ++i) {
for (int j=1; j<nr-1; ++j) {
tmp = T(i,j);
T(i,j) = ((T(i,j+1) - T(i,j-1))/twoDrR(i-1,j-1) +
(T(i,j-1) + T(i,j+1))/dr2 +
(T(i-1,j) + T(i+1,j))/dTheta2R2(i-1,j-1)
) / dnr(i-1,j-1);
diff = T(i,j) - tmp;
err += diff*diff;
}
}
return_val = sqrt(err);
"""
err = weave.inline(code,
['nr', 'nt', 'T', 'twoDrR',
'dr2', 'dTheta2R2', 'dnr'],
type_converters=converters.blitz,
compiler = 'gcc')
# Transfer result back to mesh/grid
self.meshT = T
# Symmetry boundary conditions
self.symmetryBC()
return err
def setIterator(self, iterator='numpy'):
""" Sets the iteration scheme to be used while solving given a
string which should be one of ['numpy', 'blitz', 'inline']. """
if iterator == 'numpy':
self.iterate = self.numpyStep
# elif iterator == 'blitz':
# self.iterate = self.blitzStep
# elif iterator == 'inline':
# self.iterate = self.inlineStep
else:
self.iterate = self.numpyStep
def solve(self, n_iter=0, eps=1.0e-16):
""" Solves the equation given:
- an error precision -- eps
- a maximum number of iterations -- n_iter """
err = self.iterate()
count = 1
while err > eps:
if n_iter and count >= n_iter:
return err
err = self.iterate()
count = count + 1
self.T = self.meshT[1:-1,:]
return count
def postProcessing(self):
# self.heatFluxBalance()
self.linearElasticStress()
# self.babcockAndWilcoxStress()
return
############################ BOUNDARY CONDITIONS ###########################
def symmetryBC(self):
""" Sets the left and right symmetry BCs """
self.meshT[0, 1:-1] = self.meshT[2, 1:-1]
self.meshT[-1, 1:-1] = self.meshT[-3, 1:-1]
def extTubeHalfTemp(self):
""" fixedValue boundary condition """
self.meshT[:,-1] = self.T_ext
def extTubeHalfConv(self):
""" Convective boundary condition """
self.meshT[:, -1] = (self.meshT[:,-2] + \
((self.g.dr * self.h_ext /
self.k) * self.T_ext)) \
/ (1 + (self.g.dr * self.h_ext / self.k))
def extTubeHalfFlux(self):
""" Heat flux boundary condition """
self.meshT[:,-1] = ((self.g.dr * self.CG) /
self.k) + self.meshT[:, -2]
def extTubeHalfCosFlux(self):
""" 100% absorbed cosine flux boundary condition """
self.phi_inc = (self.g.halfTube * \
self.CG * self.g.cosTheta)
phi_t = self.phi_inc
self.meshT[:,-1] = self.meshT[:,-2] + \
(phi_t * self.g.dr / self.k)
def extTubeHalfCosFluxRadConv(self):
""" Heat flux, re-radiation and convection boundary condition """
self.phi_inc = (self.g.halfTube * \
self.CG * self.g.cosTheta)
phi_t = self.phi_inc * self.A \
- (self.sigma * self.epsilon \
* (self.meshT[:,-1]**4 - self.T_ext**4)) \
- (self.h_ext * (self.meshT[:,-1] - self.T_ext))
self.meshT[:,-1] = self.meshT[:,-2] + \
(phi_t * self.g.dr / self.k)
def extTubeFullCosFluxRadConv(self):
""" Heat flux, re-radiation and convection boundary condition """
self.phi_inc = (self.CG * np.abs(self.g.cosTheta))
phi_t = self.phi_inc * self.A \
- (self.sigma * self.epsilon \
* (self.meshT[:,-1]**4 - self.T_ext**4)) \
- (self.h_ext * (self.meshT[:,-1] - self.T_ext))
self.meshT[:,-1] = self.meshT[:,-2] + \
(phi_t * self.g.dr / self.k)
def extTubeFluxProfileRadConv(self):
""" Heat flux profile, re-radiation and convection boundary condition """
phi_t = self.phi_inc * self.A \
- (self.sigma * self.epsilon \
* (self.meshT[:,-1]**4 - self.T_ext**4)) \
- (self.h_ext * (self.meshT[:,-1] - self.T_ext))
self.meshT[:,-1] = self.meshT[:,-2] + \
(phi_t * self.g.dr / self.k)
def extTubeHalfCosFluxRadConvAdiabaticBack(self):
""" Heat flux, re-radiation and convection boundary condition """
self.phi_inc = (self.g.halfTube * \
self.CG * self.g.cosTheta)
phi_t = self.phi_inc * self.A \
- (self.g.halfTube * self.sigma * self.epsilon \
* (self.meshT[:,-1]**4 - self.T_ext**4)) \
- (self.h_ext * self.g.halfTube *
(self.meshT[:,-1] - self.T_ext))
self.meshT[:,-1] = self.meshT[:,-2] + \
(phi_t | |
<filename>DeepLearning UK Lottery -Euro-Hotpick-Latest.py
#!/usr/bin/env python
# coding: utf-8
# In[3]:
# -*- coding: utf-8 -*-
"""
Created on 10th Feb 2021
@author: <NAME>
"""
#importing libraries
import tensorflow as tf #tensorflow lib for defining the architecture of neural network
import pandas as pd #pandas lib we will use to load the dataset from excel (.csv) file
import numpy as np # numpy array to deal with arrays
import random # random lib to generate random numbers
#%%
'''
Create input data set according to output numbers
'''
numbers = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],
[1, 2, 3, 4, 5, 6, 7, | |
'OpShiftRightLogical' : 194,
'OpShiftRightArithmetic' : 195,
'OpShiftLeftLogical' : 196,
'OpBitwiseOr' : 197,
'OpBitwiseXor' : 198,
'OpBitwiseAnd' : 199,
'OpNot' : 200,
'OpBitFieldInsert' : 201,
'OpBitFieldSExtract' : 202,
'OpBitFieldUExtract' : 203,
'OpBitReverse' : 204,
'OpBitCount' : 205,
'OpDPdx' : 207,
'OpDPdy' : 208,
'OpFwidth' : 209,
'OpDPdxFine' : 210,
'OpDPdyFine' : 211,
'OpFwidthFine' : 212,
'OpDPdxCoarse' : 213,
'OpDPdyCoarse' : 214,
'OpFwidthCoarse' : 215,
'OpEmitVertex' : 218,
'OpEndPrimitive' : 219,
'OpEmitStreamVertex' : 220,
'OpEndStreamPrimitive' : 221,
'OpControlBarrier' : 224,
'OpMemoryBarrier' : 225,
'OpAtomicLoad' : 227,
'OpAtomicStore' : 228,
'OpAtomicExchange' : 229,
'OpAtomicCompareExchange' : 230,
'OpAtomicCompareExchangeWeak' : 231,
'OpAtomicIIncrement' : 232,
'OpAtomicIDecrement' : 233,
'OpAtomicIAdd' : 234,
'OpAtomicISub' : 235,
'OpAtomicSMin' : 236,
'OpAtomicUMin' : 237,
'OpAtomicSMax' : 238,
'OpAtomicUMax' : 239,
'OpAtomicAnd' : 240,
'OpAtomicOr' : 241,
'OpAtomicXor' : 242,
'OpPhi' : 245,
'OpLoopMerge' : 246,
'OpSelectionMerge' : 247,
'OpLabel' : 248,
'OpBranch' : 249,
'OpBranchConditional' : 250,
'OpSwitch' : 251,
'OpKill' : 252,
'OpReturn' : 253,
'OpReturnValue' : 254,
'OpUnreachable' : 255,
'OpLifetimeStart' : 256,
'OpLifetimeStop' : 257,
'OpGroupAsyncCopy' : 259,
'OpGroupWaitEvents' : 260,
'OpGroupAll' : 261,
'OpGroupAny' : 262,
'OpGroupBroadcast' : 263,
'OpGroupIAdd' : 264,
'OpGroupFAdd' : 265,
'OpGroupFMin' : 266,
'OpGroupUMin' : 267,
'OpGroupSMin' : 268,
'OpGroupFMax' : 269,
'OpGroupUMax' : 270,
'OpGroupSMax' : 271,
'OpReadPipe' : 274,
'OpWritePipe' : 275,
'OpReservedReadPipe' : 276,
'OpReservedWritePipe' : 277,
'OpReserveReadPipePackets' : 278,
'OpReserveWritePipePackets' : 279,
'OpCommitReadPipe' : 280,
'OpCommitWritePipe' : 281,
'OpIsValidReserveId' : 282,
'OpGetNumPipePackets' : 283,
'OpGetMaxPipePackets' : 284,
'OpGroupReserveReadPipePackets' : 285,
'OpGroupReserveWritePipePackets' : 286,
'OpGroupCommitReadPipe' : 287,
'OpGroupCommitWritePipe' : 288,
'OpEnqueueMarker' : 291,
'OpEnqueueKernel' : 292,
'OpGetKernelNDrangeSubGroupCount' : 293,
'OpGetKernelNDrangeMaxSubGroupSize' : 294,
'OpGetKernelWorkGroupSize' : 295,
'OpGetKernelPreferredWorkGroupSizeMultiple' : 296,
'OpRetainEvent' : 297,
'OpReleaseEvent' : 298,
'OpCreateUserEvent' : 299,
'OpIsValidEvent' : 300,
'OpSetUserEventStatus' : 301,
'OpCaptureEventProfilingInfo' : 302,
'OpGetDefaultQueue' : 303,
'OpBuildNDRange' : 304,
'OpImageSparseSampleImplicitLod' : 305,
'OpImageSparseSampleExplicitLod' : 306,
'OpImageSparseSampleDrefImplicitLod' : 307,
'OpImageSparseSampleDrefExplicitLod' : 308,
'OpImageSparseSampleProjImplicitLod' : 309,
'OpImageSparseSampleProjExplicitLod' : 310,
'OpImageSparseSampleProjDrefImplicitLod' : 311,
'OpImageSparseSampleProjDrefExplicitLod' : 312,
'OpImageSparseFetch' : 313,
'OpImageSparseGather' : 314,
'OpImageSparseDrefGather' : 315,
'OpImageSparseTexelsResident' : 316,
'OpNoLine' : 317,
'OpAtomicFlagTestAndSet' : 318,
'OpAtomicFlagClear' : 319,
'OpImageSparseRead' : 320,
'OpSizeOf' : 321,
'OpTypePipeStorage' : 322,
'OpConstantPipeStorage' : 323,
'OpCreatePipeFromPipeStorage' : 324,
'OpGetKernelLocalSizeForSubgroupCount' : 325,
'OpGetKernelMaxNumSubgroups' : 326,
'OpTypeNamedBarrier' : 327,
'OpNamedBarrierInitialize' : 328,
'OpMemoryNamedBarrier' : 329,
'OpModuleProcessed' : 330,
'OpExecutionModeId' : 331,
'OpDecorateId' : 332,
'OpGroupNonUniformElect' : 333,
'OpGroupNonUniformAll' : 334,
'OpGroupNonUniformAny' : 335,
'OpGroupNonUniformAllEqual' : 336,
'OpGroupNonUniformBroadcast' : 337,
'OpGroupNonUniformBroadcastFirst' : 338,
'OpGroupNonUniformBallot' : 339,
'OpGroupNonUniformInverseBallot' : 340,
'OpGroupNonUniformBallotBitExtract' : 341,
'OpGroupNonUniformBallotBitCount' : 342,
'OpGroupNonUniformBallotFindLSB' : 343,
'OpGroupNonUniformBallotFindMSB' : 344,
'OpGroupNonUniformShuffle' : 345,
'OpGroupNonUniformShuffleXor' : 346,
'OpGroupNonUniformShuffleUp' : 347,
'OpGroupNonUniformShuffleDown' : 348,
'OpGroupNonUniformIAdd' : 349,
'OpGroupNonUniformFAdd' : 350,
'OpGroupNonUniformIMul' : 351,
'OpGroupNonUniformFMul' : 352,
'OpGroupNonUniformSMin' : 353,
'OpGroupNonUniformUMin' : 354,
'OpGroupNonUniformFMin' : 355,
'OpGroupNonUniformSMax' : 356,
'OpGroupNonUniformUMax' : 357,
'OpGroupNonUniformFMax' : 358,
'OpGroupNonUniformBitwiseAnd' : 359,
'OpGroupNonUniformBitwiseOr' : 360,
'OpGroupNonUniformBitwiseXor' : 361,
'OpGroupNonUniformLogicalAnd' : 362,
'OpGroupNonUniformLogicalOr' : 363,
'OpGroupNonUniformLogicalXor' : 364,
'OpGroupNonUniformQuadBroadcast' : 365,
'OpGroupNonUniformQuadSwap' : 366,
'OpCopyLogical' : 400,
'OpPtrEqual' : 401,
'OpPtrNotEqual' : 402,
'OpPtrDiff' : 403,
'OpTerminateInvocation' : 4416,
'OpSubgroupBallotKHR' : 4421,
'OpSubgroupFirstInvocationKHR' : 4422,
'OpSubgroupAllKHR' : 4428,
'OpSubgroupAnyKHR' : 4429,
'OpSubgroupAllEqualKHR' : 4430,
'OpSubgroupReadInvocationKHR' : 4432,
'OpTraceRayKHR' : 4445,
'OpExecuteCallableKHR' : 4446,
'OpConvertUToAccelerationStructureKHR' : 4447,
'OpIgnoreIntersectionKHR' : 4448,
'OpTerminateRayKHR' : 4449,
'OpSDotKHR' : 4450,
'OpUDotKHR' : 4451,
'OpSUDotKHR' : 4452,
'OpSDotAccSatKHR' : 4453,
'OpUDotAccSatKHR' : 4454,
'OpSUDotAccSatKHR' : 4455,
'OpTypeRayQueryKHR' : 4472,
'OpRayQueryInitializeKHR' : 4473,
'OpRayQueryTerminateKHR' : 4474,
'OpRayQueryGenerateIntersectionKHR' : 4475,
'OpRayQueryConfirmIntersectionKHR' : 4476,
'OpRayQueryProceedKHR' : 4477,
'OpRayQueryGetIntersectionTypeKHR' : 4479,
'OpGroupIAddNonUniformAMD' : 5000,
'OpGroupFAddNonUniformAMD' : 5001,
'OpGroupFMinNonUniformAMD' : 5002,
'OpGroupUMinNonUniformAMD' : 5003,
'OpGroupSMinNonUniformAMD' : 5004,
'OpGroupFMaxNonUniformAMD' : 5005,
'OpGroupUMaxNonUniformAMD' : 5006,
'OpGroupSMaxNonUniformAMD' : 5007,
'OpFragmentMaskFetchAMD' : 5011,
'OpFragmentFetchAMD' : 5012,
'OpReadClockKHR' : 5056,
'OpImageSampleFootprintNV' : 5283,
'OpGroupNonUniformPartitionNV' : 5296,
'OpWritePackedPrimitiveIndices4x8NV' : 5299,
'OpReportIntersectionKHR' : 5334,
'OpReportIntersectionNV' : 5334,
'OpIgnoreIntersectionNV' : 5335,
'OpTerminateRayNV' : 5336,
'OpTraceNV' : 5337,
'OpTypeAccelerationStructureKHR' : 5341,
'OpTypeAccelerationStructureNV' : 5341,
'OpExecuteCallableNV' : 5344,
'OpTypeCooperativeMatrixNV' : 5358,
'OpCooperativeMatrixLoadNV' : 5359,
'OpCooperativeMatrixStoreNV' : 5360,
'OpCooperativeMatrixMulAddNV' : 5361,
'OpCooperativeMatrixLengthNV' : 5362,
'OpBeginInvocationInterlockEXT' : 5364,
'OpEndInvocationInterlockEXT' : 5365,
'OpDemoteToHelperInvocationEXT' : 5380,
'OpIsHelperInvocationEXT' : 5381,
'OpSubgroupShuffleINTEL' : 5571,
'OpSubgroupShuffleDownINTEL' : 5572,
'OpSubgroupShuffleUpINTEL' : 5573,
'OpSubgroupShuffleXorINTEL' : 5574,
'OpSubgroupBlockReadINTEL' : 5575,
'OpSubgroupBlockWriteINTEL' : 5576,
'OpSubgroupImageBlockReadINTEL' : 5577,
'OpSubgroupImageBlockWriteINTEL' : 5578,
'OpSubgroupImageMediaBlockReadINTEL' : 5580,
'OpSubgroupImageMediaBlockWriteINTEL' : 5581,
'OpUCountLeadingZerosINTEL' : 5585,
'OpUCountTrailingZerosINTEL' : 5586,
'OpAbsISubINTEL' : 5587,
'OpAbsUSubINTEL' : 5588,
'OpIAddSatINTEL' : 5589,
'OpUAddSatINTEL' : 5590,
'OpIAverageINTEL' : 5591,
'OpUAverageINTEL' : 5592,
'OpIAverageRoundedINTEL' : 5593,
'OpUAverageRoundedINTEL' : 5594,
'OpISubSatINTEL' : 5595,
'OpUSubSatINTEL' : 5596,
'OpIMul32x16INTEL' : 5597,
'OpUMul32x16INTEL' : 5598,
'OpConstFunctionPointerINTEL' : 5600,
'OpFunctionPointerCallINTEL' : 5601,
'OpAsmTargetINTEL' : 5609,
'OpAsmINTEL' : 5610,
'OpAsmCallINTEL' : 5611,
'OpAtomicFMinEXT' : 5614,
'OpAtomicFMaxEXT' : 5615,
'OpAssumeTrueKHR' : 5630,
'OpExpectKHR' : 5631,
'OpDecorateString' : 5632,
'OpDecorateStringGOOGLE' : 5632,
'OpMemberDecorateString' : 5633,
'OpMemberDecorateStringGOOGLE' : 5633,
'OpVmeImageINTEL' : 5699,
'OpTypeVmeImageINTEL' : 5700,
'OpTypeAvcImePayloadINTEL' : 5701,
'OpTypeAvcRefPayloadINTEL' : 5702,
'OpTypeAvcSicPayloadINTEL' : 5703,
'OpTypeAvcMcePayloadINTEL' : 5704,
'OpTypeAvcMceResultINTEL' : 5705,
'OpTypeAvcImeResultINTEL' : 5706,
'OpTypeAvcImeResultSingleReferenceStreamoutINTEL' : 5707,
'OpTypeAvcImeResultDualReferenceStreamoutINTEL' : 5708,
'OpTypeAvcImeSingleReferenceStreaminINTEL' : 5709,
'OpTypeAvcImeDualReferenceStreaminINTEL' : 5710,
'OpTypeAvcRefResultINTEL' : 5711,
'OpTypeAvcSicResultINTEL' : 5712,
'OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL' : 5713,
'OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL' : 5714,
'OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL' : 5715,
'OpSubgroupAvcMceSetInterShapePenaltyINTEL' : 5716,
'OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL' : 5717,
'OpSubgroupAvcMceSetInterDirectionPenaltyINTEL' : 5718,
'OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL' : 5719,
'OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL' : 5720,
'OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL' : 5721,
'OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL' : 5722,
'OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL' : 5723,
'OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL' : 5724,
'OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL' : 5725,
'OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL' : 5726,
'OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL' : 5727,
'OpSubgroupAvcMceSetAcOnlyHaarINTEL' : 5728,
'OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL' : 5729,
'OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL' : 5730,
'OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL' : 5731,
'OpSubgroupAvcMceConvertToImePayloadINTEL' : 5732,
'OpSubgroupAvcMceConvertToImeResultINTEL' : 5733,
'OpSubgroupAvcMceConvertToRefPayloadINTEL' : 5734,
'OpSubgroupAvcMceConvertToRefResultINTEL' : 5735,
'OpSubgroupAvcMceConvertToSicPayloadINTEL' : 5736,
'OpSubgroupAvcMceConvertToSicResultINTEL' : 5737,
'OpSubgroupAvcMceGetMotionVectorsINTEL' : 5738,
'OpSubgroupAvcMceGetInterDistortionsINTEL' : 5739,
'OpSubgroupAvcMceGetBestInterDistortionsINTEL' : 5740,
'OpSubgroupAvcMceGetInterMajorShapeINTEL' : 5741,
'OpSubgroupAvcMceGetInterMinorShapeINTEL' : 5742,
'OpSubgroupAvcMceGetInterDirectionsINTEL' : 5743,
'OpSubgroupAvcMceGetInterMotionVectorCountINTEL' : 5744,
'OpSubgroupAvcMceGetInterReferenceIdsINTEL' : 5745,
'OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL' : 5746,
'OpSubgroupAvcImeInitializeINTEL' : 5747,
'OpSubgroupAvcImeSetSingleReferenceINTEL' : 5748,
'OpSubgroupAvcImeSetDualReferenceINTEL' : 5749,
'OpSubgroupAvcImeRefWindowSizeINTEL' : 5750,
'OpSubgroupAvcImeAdjustRefOffsetINTEL' : 5751,
'OpSubgroupAvcImeConvertToMcePayloadINTEL' : 5752,
'OpSubgroupAvcImeSetMaxMotionVectorCountINTEL' : 5753,
'OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL' : 5754,
'OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL' : 5755,
'OpSubgroupAvcImeSetWeightedSadINTEL' : 5756,
'OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL' : 5757,
'OpSubgroupAvcImeEvaluateWithDualReferenceINTEL' : 5758,
'OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL' : 5759,
'OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL' : 5760,
'OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL' : 5761,
'OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL' : 5762,
'OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL' : 5763,
'OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL' : 5764,
'OpSubgroupAvcImeConvertToMceResultINTEL' : 5765,
'OpSubgroupAvcImeGetSingleReferenceStreaminINTEL' : 5766,
'OpSubgroupAvcImeGetDualReferenceStreaminINTEL' : 5767,
'OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL' : 5768,
'OpSubgroupAvcImeStripDualReferenceStreamoutINTEL' : 5769,
'OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL' : 5770,
'OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL' : 5771,
'OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL' : 5772,
'OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL' : 5773,
'OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL' : 5774,
'OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL' : 5775,
'OpSubgroupAvcImeGetBorderReachedINTEL' : 5776,
'OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL' : 5777,
'OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL' : 5778,
'OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL' : 5779,
'OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL' : 5780,
'OpSubgroupAvcFmeInitializeINTEL' : 5781,
'OpSubgroupAvcBmeInitializeINTEL' : 5782,
'OpSubgroupAvcRefConvertToMcePayloadINTEL' : 5783,
'OpSubgroupAvcRefSetBidirectionalMixDisableINTEL' : 5784,
'OpSubgroupAvcRefSetBilinearFilterEnableINTEL' : 5785,
'OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL' : 5786,
'OpSubgroupAvcRefEvaluateWithDualReferenceINTEL' : 5787,
'OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL' : 5788,
'OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL' : 5789,
'OpSubgroupAvcRefConvertToMceResultINTEL' : 5790,
'OpSubgroupAvcSicInitializeINTEL' : 5791,
'OpSubgroupAvcSicConfigureSkcINTEL' : 5792,
'OpSubgroupAvcSicConfigureIpeLumaINTEL' : 5793,
'OpSubgroupAvcSicConfigureIpeLumaChromaINTEL' : 5794,
'OpSubgroupAvcSicGetMotionVectorMaskINTEL' : 5795,
'OpSubgroupAvcSicConvertToMcePayloadINTEL' : 5796,
'OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL' : 5797,
'OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL' : 5798,
'OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL' : 5799,
'OpSubgroupAvcSicSetBilinearFilterEnableINTEL' : 5800,
'OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL' : 5801,
'OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL' : 5802,
'OpSubgroupAvcSicEvaluateIpeINTEL' : 5803,
'OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL' : 5804,
'OpSubgroupAvcSicEvaluateWithDualReferenceINTEL' : 5805,
'OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL' : 5806,
'OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL' : 5807,
'OpSubgroupAvcSicConvertToMceResultINTEL' : 5808,
'OpSubgroupAvcSicGetIpeLumaShapeINTEL' : 5809,
'OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL' : 5810,
'OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL' : 5811,
'OpSubgroupAvcSicGetPackedIpeLumaModesINTEL' : 5812,
'OpSubgroupAvcSicGetIpeChromaModeINTEL' : 5813,
'OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL' : 5814,
'OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL' : 5815,
'OpSubgroupAvcSicGetInterRawSadsINTEL' : 5816,
'OpVariableLengthArrayINTEL' : 5818,
'OpSaveMemoryINTEL' : 5819,
'OpRestoreMemoryINTEL' : 5820,
'OpArbitraryFloatSinCosPiINTEL' : 5840,
'OpArbitraryFloatCastINTEL' : 5841,
'OpArbitraryFloatCastFromIntINTEL' : 5842,
'OpArbitraryFloatCastToIntINTEL' : 5843,
'OpArbitraryFloatAddINTEL' : 5846,
'OpArbitraryFloatSubINTEL' : 5847,
'OpArbitraryFloatMulINTEL' : 5848,
'OpArbitraryFloatDivINTEL' : 5849,
'OpArbitraryFloatGTINTEL' : 5850,
'OpArbitraryFloatGEINTEL' : 5851,
'OpArbitraryFloatLTINTEL' : 5852,
'OpArbitraryFloatLEINTEL' : 5853,
'OpArbitraryFloatEQINTEL' : 5854,
'OpArbitraryFloatRecipINTEL' : 5855,
'OpArbitraryFloatRSqrtINTEL' : 5856,
'OpArbitraryFloatCbrtINTEL' : 5857,
'OpArbitraryFloatHypotINTEL' : 5858,
'OpArbitraryFloatSqrtINTEL' : 5859,
'OpArbitraryFloatLogINTEL' : 5860,
'OpArbitraryFloatLog2INTEL' : 5861,
'OpArbitraryFloatLog10INTEL' : 5862,
'OpArbitraryFloatLog1pINTEL' : 5863,
'OpArbitraryFloatExpINTEL' : 5864,
'OpArbitraryFloatExp2INTEL' : 5865,
'OpArbitraryFloatExp10INTEL' : 5866,
'OpArbitraryFloatExpm1INTEL' : 5867,
'OpArbitraryFloatSinINTEL' : 5868,
'OpArbitraryFloatCosINTEL' : 5869,
'OpArbitraryFloatSinCosINTEL' : 5870,
'OpArbitraryFloatSinPiINTEL' : 5871,
'OpArbitraryFloatCosPiINTEL' : 5872,
'OpArbitraryFloatASinINTEL' : 5873,
'OpArbitraryFloatASinPiINTEL' : 5874,
'OpArbitraryFloatACosINTEL' : 5875,
'OpArbitraryFloatACosPiINTEL' : 5876,
'OpArbitraryFloatATanINTEL' : 5877,
'OpArbitraryFloatATanPiINTEL' : 5878,
'OpArbitraryFloatATan2INTEL' : 5879,
'OpArbitraryFloatPowINTEL' : 5880,
'OpArbitraryFloatPowRINTEL' : 5881,
'OpArbitraryFloatPowNINTEL' : 5882,
'OpLoopControlINTEL' : 5887,
'OpFixedSqrtINTEL' : 5923,
'OpFixedRecipINTEL' : 5924,
'OpFixedRsqrtINTEL' : 5925,
'OpFixedSinINTEL' : 5926,
'OpFixedCosINTEL' : 5927,
'OpFixedSinCosINTEL' : 5928,
'OpFixedSinPiINTEL' : | |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['acr'] = """
type: group
short-summary: Manage Azure Container Registries.
"""
helps['acr credential'] = """
type: group
short-summary: Manage login credentials for Azure Container Registries.
"""
helps['acr repository'] = """
type: group
short-summary: Manage repositories for Azure Container Registries.
"""
helps['acr webhook'] = """
type: group
short-summary: Manage webhooks for Azure Container Registries.
"""
helps['acr replication'] = """
type: group
short-summary: Manage replications for Azure Container Registries.
"""
helps['acr check-name'] = """
type: command
short-summary: Checks if a container registry name is available for use.
examples:
- name: Check if a registry name already exists.
text: >
az acr check-name -n doesthisnameexist
"""
helps['acr list'] = """
type: command
short-summary: Lists all the container registries under the current subscription.
examples:
- name: List container registries and show the results in a table.
text: >
az acr list -o table
- name: List container registries in a resource group and show the results in a table.
text: >
az acr list -g MyResourceGroup -o table
"""
helps['acr create'] = """
type: command
short-summary: Creates a container registry.
examples:
- name: Create a managed container registry with the Standard SKU.
text: >
az acr create -n MyRegistry -g MyResourceGroup --sku Standard
- name: Create a container registry with a new storage account with the Classic SKU.
text: >
az acr create -n MyRegistry -g MyResourceGroup --sku Classic
"""
helps['acr delete'] = """
type: command
short-summary: Deletes a container registry.
examples:
- name: Delete a container registry.
text: >
az acr delete -n MyRegistry
"""
helps['acr show'] = """
type: command
short-summary: Get the details of a container registry.
examples:
- name: Get the login server for a container registry.
text: >
az acr show -n MyRegistry --query loginServer
"""
helps['acr update'] = """
type: command
short-summary: Update a container registry.
examples:
- name: Update tags for a container registry.
text: >
az acr update -n MyRegistry --tags key1=value1 key2=value2
- name: Update the storage account for a container registry.
text: >
az acr update -n MyRegistry --storage-account-name MyStorageAccount
- name: Enable the administrator user account for a container registry.
text: >
az acr update -n MyRegistry --admin-enabled true
"""
helps['acr login'] = """
type: command
short-summary: Log in to a container registry through Docker.
examples:
- name: Log in to a container registry
text: >
az acr login -n MyRegistry
"""
helps['acr show-usage'] = """
type: command
short-summary: Get the quota usages for a container registry.
examples:
- name: Get the quota usages for a container registry.
text: >
az acr show-usage -n MyRegistry
"""
helps['acr credential show'] = """
type: command
short-summary: Get the login credentials for a container registry.
examples:
- name: Get the login credentials for a container registry.
text: >
az acr credential show -n MyRegistry
- name: Get the username used to log into a container registry.
text: >
az acr credential show -n MyRegistry --query username
- name: Get a password used to log into a container registry.
text: >
az acr credential show -n MyRegistry --query passwords[0].value
"""
helps['acr credential renew'] = """
type: command
short-summary: Regenerate login credentials for a container registry.
examples:
- name: Renew the second password for a container registry.
text: >
az acr credential renew -n MyRegistry --password-name <PASSWORD>
"""
helps['acr repository list'] = """
type: command
short-summary: List repositories in a container registry.
examples:
- name: List repositories in a given container registry.
text:
az acr repository list -n MyRegistry
"""
helps['acr repository show-tags'] = """
type: command
short-summary: Show tags for a repository in a container registry.
examples:
- name: Show tags of a repository in a container registry.
text:
az acr repository show-tags -n MyRegistry --repository MyRepository
"""
helps['acr repository show-manifests'] = """
type: command
short-summary: Show manifests of a repository in a container registry.
examples:
- name: Show manifests of a repository in a container registry.
text:
az acr repository show-manifests -n MyRegistry --repository MyRepository
"""
helps['acr repository delete'] = """
type: command
short-summary: Delete a repository, manifest, or tag in a container registry.
examples:
- name: Delete a repository from a container registry.
text:
az acr repository delete -n MyRegistry --repository MyRepository
- name: Delete a tag from a repository. This does not delete the manifest referenced by the tag or any associated layer data.
text:
az acr repository delete -n MyRegistry --repository MyRepository --tag MyTag
- name: Delete the manifest referenced by a tag. This also deletes any associated layer data and all other tags referencing the manifest.
text:
az acr repository delete -n MyRegistry --repository MyRepository --tag MyTag --manifest
- name: Delete a manfiest from a repository. This also deletes any associated layer data and all tags referencing the manifest.
text:
az acr repository delete -n MyRegistry --repository MyRepository --manifest MyManifest
"""
helps['acr webhook list'] = """
type: command
short-summary: List all of the webhooks for a container registry.
examples:
- name: List webhooks and show the results in a table.
text: >
az acr webhook list -r MyRegistry -o table
"""
helps['acr webhook create'] = """
type: command
short-summary: Create a webhook for a container registry.
examples:
- name: Create a webhook for a container registry that will deliver Docker push and delete events to a service URI.
text: >
az acr webhook create -n MyWebhook -r MyRegistry --uri http://myservice.com --actions push delete
- name: Create a webhook for a container registry that will deliver Docker push events to a service URI with a basic authentication header.
text: >
az acr webhook create -n MyWebhook -r MyRegistry --uri http://myservice.com --actions push --headers "Authorization=Basic 000000"
"""
helps['acr webhook delete'] = """
type: command
short-summary: Delete a webhook from a container registry.
examples:
- name: Delete a webhook from a container registry.
text: >
az acr webhook delete -n MyWebhook -r MyRegistry
"""
helps['acr webhook show'] = """
type: command
short-summary: Get the details of a webhook.
examples:
- name: Get the details of a webhook.
text: >
az acr webhook show -n MyWebhook -r MyRegistry
"""
helps['acr webhook update'] = """
type: command
short-summary: Update a webhook.
examples:
- name: Update headers for a webhook.
text: >
az acr webhook update -n MyWebhook -r MyRegistry --headers "Authorization=Basic 000000"
- name: Update the service URI and actions for a webhook.
text: >
az acr webhook update -n MyWebhook -r MyRegistry --uri http://myservice.com --actions push delete
- name: Disable a webhook.
text: >
az acr webhook update -n MyWebhook -r MyRegistry --status disabled
"""
helps['acr webhook get-config'] = """
type: command
short-summary: Get the service URI and custom headers for the webhook.
examples:
- name: Get the configuration information for a webhook.
text: >
az acr webhook get-config -n MyWebhook -r MyRegistry
"""
helps['acr webhook ping'] = """
type: command
short-summary: Trigger a ping event for a webhook.
examples:
- name: Trigger a ping event for a webhook.
text: >
az acr webhook ping -n MyWebhook -r MyRegistry
"""
helps['acr webhook list-events'] = """
type: command
short-summary: List recent events for a webhook.
examples:
- name: List recent events for a webhook.
text: >
az acr webhook list-events -n MyWebhook -r MyRegistry
"""
helps['acr replication list'] = """
type: command
short-summary: List all of the replications for a container registry.
examples:
- name: List replications and show the results in a table.
text: >
az acr replication list -r MyRegistry -o table
"""
helps['acr replication create'] = """
type: command
short-summary: Create a replication for a container registry.
examples:
- name: Create a replication for a container registry.
text: >
az acr replication create -r MyRegistry -l westus
"""
helps['acr replication delete'] = """
type: command
short-summary: Delete a replication from a container registry.
examples:
- name: Delete a replication from a container registry.
text: >
az acr replication delete -n MyReplication -r MyRegistry
"""
helps['acr replication show'] = """
type: command
short-summary: Get the details of a replication.
examples:
- name: Get the | |
import unittest
from collections import OrderedDict
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
# Create your tests here.
from pyexcel import Sheet, Book
from Grades.exceptions import GradeException
from Grades.models import *
from importer.forms import GradeUploadForm, TestGradeUploadForm, ImportStudentModule, COLUMN_TITLE_ROW, ImportModuleForm
from importer.views import make_grade
@unittest.skip("ImporterStressTest is ignored by default. Comment out line 16 in Importer/tests.py to test.")
class ImporterStressTest(TestCase):
def setUp(self):
tcs = Study.objects.create(abbreviation='TCS', name='Technical Computer Science')
module_tcs = Module.objects.create(name='Parels der Informatica')
user = User.objects.create(username='mverkleij', password='<PASSWORD>')
teacher = Person.objects.create(name='<NAME>', university_number='m13377331', user=user)
module_ed = ModuleEdition.objects.create(module_code='201300070', module=module_tcs, year=2017, block='A1')
module_ed.save()
module_parts = [
ModulePart.objects.create(module_edition=module_ed, name='Parel {}'.format(i), teacher=[teacher]) for i in
range(100)]
Coordinator.objects.create(module_edition=module_ed, person=teacher, is_assistant=False)
tests = [Test.objects.create(name='Theory Test {}'.format(course.name), module_part=course, type='E') for course
in module_parts]
students = [Person.objects.create(name='<NAME> {}'.format(i), university_number='s1337{}'.format(i)) for i
in range(600)]
[Studying.objects.create(module_edition=module_ed, person=student, role='s') for student in students]
def test_module_import(self):
module_edition = ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij')[0]
students = Person.objects.filter(studying__module_edition=module_edition)
tests = Test.objects.filter(module_part__module_edition=module_edition)
table = [['' for _ in range(len(tests) + 1)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number'] + [test.pk for test in tests]]
for student in students:
table.append([student.university_number] + [divmod(i, 9)[1] + 1 for i in range(len(tests))])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = GradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/import_module/{}'.format(module_edition.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertRedirects(response, '/grades/modules/{}/'.format(module_edition.pk))
class ImportModuleViewTest(TestCase):
def setUp(self):
tcs = Study.objects.create(abbreviation='TCS', name='Technical Computer Science')
module_tcs = Module.objects.create(name='Parels der Informatica')
user = User.objects.create(username='mverkleij', password='<PASSWORD>')
teacher = Person.objects.create(name='<NAME>', university_number='m13377331', user=user)
module_ed = ModuleEdition.objects.create(module_code='201300070', module=module_tcs, year=2017, block='A1')
module_ed2 = ModuleEdition.objects.create(module_code='201300070', module=module_tcs, year=2018, block='A1')
module_parts = [
ModulePart.objects.create(module_edition=module_ed, name='Parel {}'.format(i), teacher=[teacher]) for i in
range(2)]
module_part_2 = ModulePart.objects.create(module_edition=module_ed2, name='Parel 3', teacher=[teacher])
Test.objects.create(name='Theory Test 1', module_part=module_part_2, type='E')
Coordinator.objects.create(module_edition=module_ed, person=teacher, is_assistant=False)
tests = [Test.objects.create(name='Theory Test {}'.format(course.name), module_part=course, type='E') for course
in module_parts]
students = [Person.objects.create(name='<NAME> {}'.format(i), university_number='s1337{}'.format(i)) for i
in range(2)]
[Studying.objects.create(module_edition=module_ed, person=student, role='s') for student in students]
# CORRECT TESTS
def test_module_description_import(self):
module_edition = \
ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij').filter(year='2017')[0]
students = Person.objects.filter(studying__module_edition=module_edition)
tests = Test.objects.filter(module_part__module_edition=module_edition)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.name + str for test in tests for str in ['', '_description'] ]]
for student in students:
table.append([student.university_number, student.name] + ['{}{}'.format((divmod(i, 9)[1] + 1), string)
if i != 1 else ''
for i in range(len(tests))
for string in ['', ' Goed gedaan']])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = ImportModuleForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
#
response = self.client.post('/importer/import_module/{}'.format(module_edition.pk), {'title': 'test.xlsx',
'file': file,
'title_row': COLUMN_TITLE_ROW + 1
})
self.assertTemplateUsed(response, template_name='importer/successfully_imported.html')
class ImporterTest(TestCase):
def setUp(self):
tcs = Study.objects.create(abbreviation='TCS', name='Technical Computer Science')
module_tcs = Module.objects.create(name='Parels der Informatica')
user = User.objects.create(username='mverkleij', password='<PASSWORD>')
teacher = Person.objects.create(name='<NAME>', university_number='m13377331', user=user)
module_ed = ModuleEdition.objects.create(module_code='201300070', module=module_tcs, year=2017, block='A1')
module_ed2 = ModuleEdition.objects.create(module_code='201300070', module=module_tcs, year=2018, block='A1')
module_parts = [
ModulePart.objects.create(module_edition=module_ed, name='Parel {}'.format(i), teacher=[teacher]) for i in
range(2)]
module_part_2 = ModulePart.objects.create(module_edition=module_ed2, name='Parel 3', teacher=[teacher])
Test.objects.create(name='Theory Test 1', module_part=module_part_2, type='E')
Coordinator.objects.create(module_edition=module_ed, person=teacher, is_assistant=False)
tests = [Test.objects.create(name='Theory Test {}'.format(course.name), module_part=course, type='E') for course
in module_parts]
students = [Person.objects.create(name='<NAME> {}'.format(i), university_number='s1337{}'.format(i)) for i
in range(2)]
[Studying.objects.create(module_edition=module_ed, person=student, role='s') for student in students]
# CORRECT TESTS
def test_module_import(self):
module_edition = \
ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij').filter(year='2017')[0]
students = Person.objects.filter(studying__module_edition=module_edition)
tests = Test.objects.filter(module_part__module_edition=module_edition)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.pk for test in tests]]
for student in students:
table.append([student.university_number, student.name] + [divmod(i, 9)[1] + 1 if i != 1 else '' for i in
range(len(tests))])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = GradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/import_module/{}'.format(module_edition.pk), {'title': 'test.xlsx',
'file': file,
'title_row': COLUMN_TITLE_ROW + 1
})
self.assertTemplateUsed(response, template_name='importer/successfully_imported.html')
def test_module_part_import(self):
module_part = ModulePart.objects.filter(module_edition__coordinator__person__user__username='mverkleij')[0]
students = Person.objects.filter(studying__module_edition__modulepart=module_part)
tests = Test.objects.filter(module_part=module_part)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.pk for test in tests]]
for student in students:
table.append([student.university_number, student.name] + [divmod(i, 9)[1] + 1 if i != 1 else '' for i in
range(len(tests))])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = GradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/module_part/{}'.format(module_part.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertTemplateUsed(response, template_name='importer/successfully_imported.html')
def test_test_import(self):
module_edition = \
ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij').filter(year='2017')[0]
test = Test.objects.filter(module_part__module_edition=module_edition)[0]
students = Person.objects.filter(studying__module_edition=module_edition)
table = [['' for _ in range(4)] for _ in range(COLUMN_TITLE_ROW)] + \
[['university_number', 'name', 'grade', 'description']]
for student in students:
table.append([student.university_number, student.name, 6, ''])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = TestGradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/test/{}'.format(test.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertRedirects(response, '/grades/tests/{}/'.format(test.pk))
# Test import by name
def test_module_import_by_name(self):
module_edition = \
ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij').filter(year='2017')[0]
students = Person.objects.filter(studying__module_edition=module_edition)
tests = Test.objects.filter(module_part__module_edition=module_edition)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.name for test in tests]]
for student in students:
table.append([student.university_number, student.name] + [divmod(i, 9)[1] + 1 for i in range(len(tests))])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/import_module/{}'.format(module_edition.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertTemplateUsed(response, template_name='importer/successfully_imported.html')
def test_module_part_import_by_name(self):
module_part = ModulePart.objects.filter(module_edition__coordinator__person__user__username='mverkleij')[0]
students = Person.objects.filter(studying__module_edition__modulepart=module_part)
tests = Test.objects.filter(module_part=module_part)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.name for test in tests]]
for student in students:
table.append([student.university_number, student.name] + [divmod(i, 9)[1] + 1 for i in range(len(tests))])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = GradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/module_part/{}'.format(module_part.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertTemplateUsed(response, template_name='importer/successfully_imported.html')
# TEST INVALID STUDENT NUMBER
def test_module_import_invalid_university_number(self):
module_edition = \
ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij').filter(year='2017')[0]
students = Person.objects.filter(studying__module_edition=module_edition)
tests = Test.objects.filter(module_part__module_edition=module_edition)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.pk for test in tests]]
for student in students:
table.append(
[student.university_number + '1', student.name] + [divmod(i, 9)[1] + 1 for i in range(len(tests))])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = GradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/import_module/{}'.format(module_edition.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertTrue('Enroll these students first before retrying' in response.content.decode())
for student in students:
self.assertTrue(student.university_number + '1' in response.content.decode())
def test_module_part_import_invalid_university_number(self):
module_part = ModulePart.objects.filter(module_edition__coordinator__person__user__username='mverkleij')[0]
students = Person.objects.filter(studying__module_edition__modulepart=module_part)
tests = Test.objects.filter(module_part=module_part)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.pk for test in tests]]
for student in students:
table.append(
[student.university_number + '1', student.name] + [divmod(i, 9)[1] + 1 for i in range(len(tests))])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = GradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/module_part/{}'.format(module_part.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertTrue('Enroll these students first before retrying' in response.content.decode())
for student in students:
self.assertTrue(student.university_number + '1' in response.content.decode())
def test_test_import_invalid_university_number(self):
module_edition = \
ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij').filter(year='2017')[0]
test = Test.objects.filter(module_part__module_edition=module_edition)[0]
students = Person.objects.filter(studying__module_edition=module_edition)
table = [['' for _ in range(4)] for _ in range(COLUMN_TITLE_ROW)] + \
[['university_number', 'name', 'grade', 'description']]
for student in students:
table.append([student.university_number + '1', student.name, 6, ''])
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = TestGradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/test/{}'.format(test.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + 1})
self.assertTrue('Enroll these students first before retrying' in response.content.decode())
for student in students:
self.assertTrue(student.university_number + '1' in response.content.decode())
# INVALID GRADE
def test_module_import_invalid_grade(self):
module_edition = \
ModuleEdition.objects.filter(coordinator__person__user__username='mverkleij').filter(year='2017')[0]
students = Person.objects.filter(studying__module_edition=module_edition)
tests = Test.objects.filter(module_part__module_edition=module_edition)
table = [['' for _ in range(len(tests) + 2)] for _ in range(COLUMN_TITLE_ROW)] + [
['university_number', 'name'] + [test.pk for test in tests]]
for student in students:
table.append(
[student.university_number, student.name] + [divmod(i, 9)[1] + 1 for i in range(len(tests))])
table[-1][2] = 'a'
sheet = Sheet(sheet=table)
content = sheet.save_as(filename='test.xlsx')
self.client.force_login(User.objects.get(username='mverkleij'))
form = GradeUploadForm(files={'file': SimpleUploadedFile('test.xlsx', open('test.xlsx', 'rb').read())})
file = ContentFile(open('test.xlsx', 'rb').read())
file.name = 'test.xlsx'
response = self.client.post('/importer/import_module/{}'.format(module_edition.pk),
{'title': 'test.xlsx', 'file': file, 'title_row': COLUMN_TITLE_ROW + | |
Resource(object):
_swagger_client = None
_mapping_schema = None
_class_name = None
_api = None
def __new__(cls, **kwargs):
try:
resource, operation = cls._mapping_schema["methods"][
"create"
].split(".")
except:
raise ms_exp.MarketSightError("No 'create' endpoint")
callable_o = getattr(getattr(cls._swagger_client, resource), operation)
instance_class_name = None
for param_spec in callable_o.operation.op_spec["parameters"]:
if param_spec.get("name") == "model" and param_spec.get("schema"):
instance_class_name = (
param_spec["schema"]["$ref"].split("/").pop()
)
if instance_class_name:
properties = cls._swagger_client.swagger_spec.spec_dict[
"definitions"
][instance_class_name]["properties"]
for field_name, prop_schema in properties.items():
if "$ref" in prop_schema:
property_class_name = prop_schema["$ref"].split("/").pop()
property_class = cls._swagger_client.get_model(
property_class_name
)
kwargs[field_name] = property_class(
**kwargs.get(field_name, {})
)
return type(
cls._class_name,
(cls._swagger_client.get_model(instance_class_name),),
{},
)(**kwargs)
@classmethod
def __dir__(cls):
return [x for x in cls._api._schema[cls.__name__]["methods"].keys()]
def conv2dict(obj):
if issubclass(type(obj), Model):
return {
attr: conv2dict(getattr(obj, attr))
for attr in obj
if getattr(obj, attr) and conv2dict(getattr(obj, attr))
}
else:
return obj
class ObjectFactory:
def __init__(self, api):
self._api = api
def __getattr__(self, item):
try:
return self._api._swagger_client.swagger_spec.definitions[item]
except KeyError:
raise('No schema for {item}')
def __dir__(self):
return self._api._swagger_client.swagger_spec.definitions.keys()
class ApplicationAPI(object):
"""The **MarketSight API Client** primary API object."""
_latest_response = None
_swagger_client = None
client_id = None
_helper = None
_method_alias = {
"Partners": {
"partners_partner_id": "retrieve"
}
}
_schema = {}
def _create_options_alias(self, resource_name, operation_id):
loc_resource_name = resource_name
result = []
ident = (operation_id.split("_", 1) + [operation_id])[1]
matches = re.finditer(
".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", ident
)
res = [m.group(0) for m in matches]
if res.__len__() == 3:
if (
res[0] in ("Post", "Delete", "Get", "Put", "Patch")
and res[1] + "_" + res[2] == resource_name[:-1]
) or (res[0] == "Get" and res[1] + "_" + res[2] == resource_name):
res = [res[0], res[1] + "_" + res[2]]
if res[0] == "Post":
if res.__len__() == 2 and res[1] == resource_name[:-1]:
result.append("create")
else:
result = res
elif res[0] == "Delete":
if res.__len__() == 2 and res[1] == resource_name[:-1]:
result.append("delete")
else:
result = res
elif res[0] == "Get":
if res.__len__() == 2 and res[1] == resource_name[:-1]:
result.append("retrieve")
elif res.__len__() == 2 and res[1] == resource_name:
result.append("list")
else:
result = res
elif res[0] == "Put":
if res.__len__() == 2 and res[1] == resource_name[:-1]:
result.append("create_or_modify")
else:
result = res
elif res[0] == "Patch":
if res.__len__() == 2 and res[1] == resource_name[:-1]:
result.append("modify")
else:
result = res
else:
result = res
# remove `resource_name` from the method.
# api.Charts.export_chart will be just `api.Charts.export`
if resource_name == "Data_Views":
try:
if result[1] == "Data" and result[2] == "View":
result = [result[0]] + result[3:]
except:
pass
result = list(filter(lambda x: x != resource_name[:-1], result))
result = map(lambda x: x.lower(), result)
return loc_resource_name, "_".join(result)
def _create_operations_aliases(self):
result = {}
for (
resource_name,
resource,
) in self._swagger_client.swagger_spec.resources.items():
for operation_id in resource.operations:
(
local_resource_name,
local_operation_id,
) = self._create_options_alias(resource_name, operation_id)
if not result.get(local_resource_name):
result[local_resource_name] = {"methods": {}}
local_operation_id = self._method_alias.get(
local_resource_name, {}
).get(local_operation_id, local_operation_id)
result[local_resource_name]["methods"][
local_operation_id
] = "{}.{}".format(resource_name, operation_id)
return result
def __init__(
self,
api_url,
spec_dict = None,
http_client = None,
config = None,
method_alias = {},
helper = None,
authenticator_class: Authenticator = BearerAuthenticator,
ssl_verify = True
):
"""Instantiate an ``ApplicationAPI`` object to interact with the Dynata
Reporting & Analytics API.
:param api_url: The API URL that will be called to interact with the Dynata
Reporting & Analytics API. Defaults to
``'https://application.marketsight.com/api/v1/'``.
:type api_url: :class:`str <python:str>`
:param spec_dict: a :class:`dict <python:dict>` containing the OpenAPI v.2.0
(Swagger) specification as a JSON-like object. Defaults to
:obj:`None <python:None>`
:type spec_dict: :class:`dict <python:dict>` / :obj:`None <python:None>`
:param http_client: The HTTP client to use for requests. Defaults to
:obj:`None <python:None>`
:param config: :class:`dict <python:dict>` with custom configuration options.
Defaults to :obj:`None <python:None>`
:type config: :class:`dict <python:dict>` / :obj:`None <python:None>`
:param method_alias: A dictionary providing default aliasing for methods.
Defaults to :obj:`None <python:None>`
:type method_alias: :class:`dict <python:dict>` / :obj:`None <python:None>`
:param helper: A helper class or instance that is used to facilitate
delayed requests and asynchronous :term:`Jobs <Job>`. Defaults to
:obj:`None <python:None>`
:type helper: :class:`Helper <marketsight.application_api.Helper>`
instance or subclass / :obj:`None <python:None>`
:returns: A **MarketSight API Client** instance
:rtype: :class:`ApplicationAPI <marketsight.application_api.ApplicationAPI>`
:raises validator_collection.errors.InvalidURLError: if ``api_url`` is not a valid URL
:raises validator_collection.errors.EmptyValueError: if ``api_url`` or ``version``
are empty
:raises validator_collection.errors.CannotCoerceError: if ``spec_dict``,
``method_alias``, or ``config`` are not empty and cannot be coerced to
a :class:`dict <python:dict>`
:raises HelperError: if ``helper`` is not a
:class:`Helper <marketsight.application_api.Helper>` instance, subclass,
or :obj:`None <python:None>`
"""
api_url = validators.url(api_url, allow_empty = False)
spec_dict = validators.dict(spec_dict, allow_empty = True)
config = validators.dict(config, allow_empty = True)
method_alias = validators.dict(method_alias, allow_empty = True)
if helper and not inspect.isclass(helper) and not checkers.is_type(helper, 'Helper'):
raise ms_exp.HelperError(
'helper must be a class object, or a Helper instance, but was an '
'instance of %s' % helper.__class__.__name__
)
self._swagger_client = ApplicationApiSwaggerClient.from_spec(
api_url = api_url,
spec_dict = spec_dict,
http_client = http_client,
config = config,
authenticator_class = authenticator_class,
ssl_verify = ssl_verify,
)
if method_alias:
self._method_alias.update(**method_alias)
self._schema = self._create_operations_aliases()
self.helper = helper
self._definitions = ObjectFactory(self)
def connect(self,
client_id = None, # partner's id
client_secret = None, # partner's secret
token = None, # or token,
_sa_token = None):
"""Authorize the instance against the MarketSight API.
:param client_id: The Partner ID that you were supplied to authenticate
against the API. Defaults to :obj:`None <python:None>`
:type client_id: :class:`str <python:str>` / :obj:`None <python:None>`
:param client_secret: The Partner Secret that you were supplied to
authenticate against the API. Defaults to :obj:`None <python:None>`
:type client_secret: :class:`str <python:str>` / :obj:`None <python:None>`
:param token: An access token to connect with. Defaults to
:obj:`None <python:None>`
:type token: :class:`str <python:str>` / :obj:`None <python:None>`
:raises ValueError: if ``client_id``/``client_secret`` and ``token`` are
all empty
"""
if not any([all([client_id, client_secret]), token, _sa_token]):
raise ValueError('must supply either a client_id/client_secret '
'combination or a token')
if any([client_id, client_secret, token]):
if token:
self.access_token = token
elif all([client_id, client_secret]):
_token = self.OAuth.token(
grant_type = "client_credentials",
client_id = client_id,
client_secret = client_secret,
)
self.access_token = _token.access_token
else:
raise ValueError(
"client_id and client_secret cannot both be empty"
)
elif _sa_token:
self.access_token = ""
sd = self._swagger_client.swagger_spec.spec_dict
referer = f"{sd['schemes'][0]}://{sd['host']}{sd['basePath']}"
session = self._swagger_client.swagger_spec.http_client.session
session.cookies = cookiejar_from_dict(
dict(MarketSightAuth=_sa_token)
)
session.headers = {"Referer": referer}
@classmethod
def from_url(cls,
url,
client_id,
client_secret,
method_alias = None):
"""Instantiate an
:class:`ApplicationAPI <marketsight.application_api.ApplicationAPI>`
object to interact with the Dynata Reporting & Analytics API based on a
remote OpenAPI v.2.0 (Swagger) specification.
:param url: The URL where the OpenAPI v.2.0 (Swagger) specification
resides. Defaults to :obj:`None <python:None>`
:type url: :class:`str <python:str>` / :obj:`None <python:None>`
:param client_id: The Partner ID that you were supplied to authenticate
against the API. Defaults to :obj:`None <python:None>`
:type client_id: :class:`str <python:str>` / :obj:`None <python:None>`
:param client_secret: The Partner Secret that you were supplied to
authenticate against the API. Defaults to :obj:`None <python:None>`
:type client_secret: :class:`str <python:str>` / :obj:`None <python:None>`
:param method_alias: A dictionary providing default aliasing for methods.
Defaults to :obj:`None <python:None>`
:type method_alias: :class:`dict <python:dict>` / :obj:`None <python:None>`
:returns: A **MarketSight API Client** instance
:rtype: :class:`ApplicationAPI <marketsight.application_api.ApplicationAPI>`
:raises validator_collection.errors.InvalidURLError: if ``url`` is not a valid URL
:raises marketsight.exceptions.MarketSightError: if unable to retrieve
the OpenAPI specification from ``url``
"""
url = validators.url(url, allow_empty = False)
response = requests.get(url)
if response.status_code == 200:
return cls(response.json(), client_id, client_secret, method_alias)
raise ms_exp.MarketSightError('Unable to retrieve the OpenAPI '
'Specification from "{}"'.format(url))
@property
def models(self):
return self._definitions
def __getattr__(self, item):
try:
schema = self._schema[item]
except KeyError:
raise ValueError("Resource `{}` doesn't exist".format(item))
_bases = [Resource]
def _create_class_method(method_name):
def _class_method(cls, *argv, **kwargs):
method_name = _class_method.__name__
if method_name in cls._mapping_schema["methods"]:
swagger_resource, swagget_operation = cls._mapping_schema[
"methods"
][method_name].split(".")
callable_operation = getattr(
getattr(cls._swagger_client, swagger_resource),
swagget_operation,
)
if argv:
kwargs.update(
zip(
tuple(
[
k
for k, v in callable_operation.operation.params.items()
if v.location
in ["path", "body", "formData"]
]
),
argv,
)
)
# fill kwargs with the not empty default value
for k, v in callable_operation.operation.params.items():
if (
hasattr(v, "default")
and v.default
and k not in kwargs
):
kwargs.update({k: v.default})
if k.endswith("_id") and k not in kwargs:
kwargs.update({k: "-"})
for key, value in kwargs.copy().items():
kwargs[key] = conv2dict(value)
if hasattr(callable_operation.operation, 'produces'):
produces = callable_operation.operation.produces
else:
produces = []
_request_options = kwargs.get("_request_options", {})
_custom_headers = _request_options.get("headers", {})
_custom_headers = {
idx.lower(): value.lower()
for idx, value in _custom_headers.items()
}
if _custom_headers.get("accept") and \
_custom_headers.get("accept") not | |
<gh_stars>10-100
#stuff on aerials: https://samuelpmish.github.io/notes/RocketLeague/aerial_control/, https://youtu.be/SmIuaXpSgBQ
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.messages.flat.QuickChatSelection import QuickChatSelection
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3, Rotator, GameInfoState, BoostState
from util.ball_prediction_analysis import find_slice_at_time, find_slices_around_time
from util.boost_pad_tracker import BoostPadTracker
from util.sequence import Sequence, ControlStep
from util.orientation import Orientation, relative_location
from util.vec import Vec3
from util.car_model import Car, Ball
from util.strike import find_strikes, execute_strike, check_strike, Strike, strike_types
from util.mechanics import *
import math
from random import randint
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.active_sequence: Sequence = None
self.boost_pad_tracker = BoostPadTracker()
self.controls = SimpleControllerState()
self.bot_car = None
self.ball = None
self.allies=[]
self.foes=[]
self.ball_prediction = None
self.posts=((Vec3(893,-5120,0),Vec3(-893,-5120,0)),(Vec3(893,5120,0),Vec3(-893,5120,0)))
self.back_corners=((Vec3(3672,-4096,0),Vec3(-3672,-4096,0)),(Vec3(3672,4096,0),Vec3(-3672,4096,0)))
self.collision_posts=((Vec3(843,-5070,0),Vec3(-843,-5070,0)),(Vec3(843,5070,0),Vec3(-843,5070,0)))
self.goal_corners=((Vec3(-893,-6000,0),Vec3(893,-5120,642.775)),(Vec3(-893,5120,0),Vec3(893,6000,642.775)))
self.boxes=((Vec3(-1600,-6000,0),Vec3(1600,-4120,2044)),(Vec3(-1600,4120,0),Vec3(1600,6000,2044)))
self.defending = False
self.rotating = False
self.supporting = 0
self.clearing = False
self.shooting = False
self.air_recovery = False
self.current_strike = None
def initialize_agent(self):
# Set up information about the boost pads now that the game is active and the info is available
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
""" Keep our boost pad info updated with which pads are currently active"""
self.boost_pad_tracker.update_boost_status(packet)
"""Update cars and ball"""
#ball
if self.ball is None:
self.ball = Ball(packet)
else:
self.ball.update(packet)
self.ball_prediction = self.get_ball_prediction_struct()
#draw 3 sec of path
self.renderer.draw_polyline_3d([Vec3(ball_slice.physics.location) for ball_slice in self.ball_prediction.slices[:180:5]],self.renderer.yellow())
#self
if self.bot_car is None:
self.bot_car = Car(self.index,packet)
elif self.bot_car.index != self.index:
self.bot_car = Car(self.index,packet)
else:
self.bot_car.update(packet)
#check if number of players has changed, and reset allies and foes if it has
if len(self.allies)+len(self.foes)+1!=len(packet.game_cars):
self.allies,self.foes = [],[]
#allies
if len(self.allies)==0:
for index in range(packet.num_cars):
if packet.game_cars[index].team==self.bot_car.team and index!=self.bot_car.index:
self.allies.append(Car(index,packet))
else:
for car in self.allies:
car.update(packet)
#foes
if len(self.foes)==0:
for index in range(packet.num_cars):
if packet.game_cars[index].team!=self.bot_car.team:
self.foes.append(Car(index,packet))
else:
for car in self.foes:
car.update(packet)
"""Continue and active sequences"""
if self.active_sequence is not None and not self.active_sequence.done:
controls = self.active_sequence.tick(packet)
if controls is not None:
return controls
self.controls = SimpleControllerState()
"""put cars in positions (for testing) (set air recovery atm)
if self.bot_car.grounded and self.bot_car.team==0:
car_state0= CarState(boost_amount=45, physics=Physics(velocity=Vector3(x=randint(-1000,1000),y=randint(-1000,1000),z=randint(-1000,1000)),location=Vector3(x=0,y=-2608,z=1000),rotation=Rotator(randint(-300,300)/100,randint(-300,300)/100,randint(-300,300)/100),angular_velocity=Vector3(randint(-300,300)/100,randint(-300,300)/100,randint(-300,300)/100)))
car_state1= CarState(boost_amount=45, physics=Physics(velocity=Vector3(x=randint(-1000,1000),y=randint(-1000,1000),z=randint(-1000,1000)),location=Vector3(x=0,y=2608,z=1000),rotation=Rotator(randint(-300,300)/100,randint(-300,300)/100,randint(-300,300)/100),angular_velocity=Vector3(randint(-300,300)/100,randint(-300,300)/100,randint(-300,300)/100)))
if self.bot_car.index!=self.bot_car.team:
car_state0,car_state1=car_state1,car_state0
ball_state= BallState(Physics(velocity=Vector3(x=0,y=0,z=0),location=Vector3(x=0,y=0,z=92.75),rotation=Rotator(0,0,0),angular_velocity=Vector3(0,0,0)))
self.set_game_state(GameState(ball=ball_state,cars={0:car_state0,1:car_state1}))
"""
"""draw in info for blocking testing
self.renderer.draw_string_2d(20,20,3,3, f"z: {self.bot_car.location.z}", self.renderer.white())
if self.bot_car.stable:
return self.long_jump(packet)
else:
return SimpleControllerState()
"""
"""kickoff NOTE: the diagonal flips need to be more sideways than forward when updating for the diagonal and long"""
if self.ball.location.flat().length()<1 and self.ball.velocity.flat().length()<1 and packet.game_info.is_kickoff_pause:
ally_on_short = False
for ally in self.allies:
if Vec3(-2048,-2560,0).dist(ally.location)<50 or Vec3(-2048,2560,0).dist(ally.location)<50 or Vec3(2048,-2560,0).dist(ally.location)<50 or Vec3(2048,2560,0).dist(ally.location)<50:
ally_on_short = True
if self.bot_car.location.flat().dist(Vec3(-2048,-2560,0))<50 or self.bot_car.location.flat().dist(Vec3(2048,2560,0))<50:
self.active_sequence, first_frame = right_diagonal(packet)
return first_frame
elif self.bot_car.location.flat().dist(Vec3(2048,-2560,0))<50 or self.bot_car.location.flat().dist(Vec3(-2048,2560,0))<50:
self.active_sequence, first_frame = left_diagonal(packet)
return first_frame
elif (self.bot_car.location.flat().dist(Vec3(-256,-3840))<50 or self.bot_car.location.flat().dist(Vec3(256,3840,0))<50) and not ally_on_short:
self.active_sequence, first_frame = long_right(packet)
return first_frame
elif (self.bot_car.location.flat().dist(Vec3(256,-3840,0))<50 or self.bot_car.location.flat().dist(Vec3(-256,3840,0))<50) and not ally_on_short:
self.active_sequence, first_frame = long_left(packet)
return first_frame
elif (self.bot_car.location.flat().dist(Vec3(0,-4608,0))<50 or self.bot_car.location.flat().dist(Vec3(0,4608,0))<50) and len(self.allies)==0:
self.active_sequence, first_frame = back_kick(packet)
return first_frame
else:
self.active_sequence, first_frame = kickoff_idle(packet)
return first_frame
"""defend check"""
if self.ball.velocity.flat().length()!=0:
post0_ang = self.posts[self.bot_car.team][0].__sub__(self.ball.location).flat().ang_to(Vec3(1,0,0))
ball_vel_ang = self.ball.velocity.flat().ang_to(Vec3(1,0,0))
post1_ang = self.posts[self.bot_car.team][1].__sub__(self.ball.location).flat().ang_to(Vec3(1,0,0))
between_posts = post0_ang < ball_vel_ang < post1_ang
moving_at_posts = self.ball.velocity.y<0 and self.ball.location.y<4000 if self.bot_car.team==0 else self.ball.velocity.y>0 and self.ball.location.y>-4000
self.defending = True if between_posts and moving_at_posts else False
"""rotate check"""
if self.rotating:
#check for reasons to stop
far_enough_behind_ball = self.bot_car.location.y-self.ball.location.y<-4000 if self.bot_car.team==0 else self.bot_car.location.y-self.ball.location.y>4000
about_to_hit_backwall = self.bot_car.location.y <-4100 if self.bot_car.team==0 else self.bot_car.location.y >4100
dunk_on_box = self.ball.location.within(self.boxes[(self.bot_car.team+1)%2][0],self.boxes[(self.bot_car.team+1)%2][1]) and min([foe.vec_to_ball.length() for foe in self.foes])>1000 and len(self.allies)==0
self.rotating = not( far_enough_behind_ball or about_to_hit_backwall or dunk_on_box )
else:
#check for reasons to start
wrong_side_of_ball_and_not_deep = self.bot_car.location.y-self.ball.location.y>0 and self.bot_car.location.y >-4000 if self.bot_car.team==0 else self.bot_car.location.y-self.ball.location.y<0 and self.bot_car.location.y <4000
vec_to_goal = Vec3(0, 5200*(self.bot_car.team*2-1),0) - self.bot_car.location
ball_to_goal = Vec3(0, 5200*(self.bot_car.team*2-1),0) - self.ball.location
unproductive_to_keep_chasing = vec_to_goal.length() < ball_to_goal.length()
self.rotating = wrong_side_of_ball_and_not_deep
"""support check"""
self.supporting=0
try:
if self.bot_car.orientation.forward.ang_to(self.bot_car.vec_to_ball)<1.5 and (self.bot_car.team ==0 and self.bot_car.velocity.normalized().y>-0.7 or self.bot_car.team ==1 and self.bot_car.velocity.normalized().y<0.7):
#self in a position to look to go
for ally in self.allies:
if ally.orientation.forward.ang_to(ally.vec_to_ball)<1.5 and (ally.team ==0 and ally.velocity.normalized().y>-0.7 or ally.team ==1 and ally.velocity.normalized().y<0.7) and (ally.assumed_maneuver =="BALL" or ally.assumed_maneuver =="SUPPORT"):
if ally.vec_to_ball.length() < self.bot_car.vec_to_ball.length():
self.supporting += 1
else:
#self in a position to look to go, allowing an extra 500 for bots that are facing ball
for ally in self.allies:
if ally.orientation.forward.ang_to(ally.vec_to_ball)<1.5 and (ally.team ==0 and ally.velocity.normalized().y>-0.7 or ally.team ==1 and ally.velocity.normalized().y<0.7) and (ally.assumed_maneuver =="BALL" or ally.assumed_maneuver =="SUPPORT"):
if ally.vec_to_ball.length() < self.bot_car.vec_to_ball.length():
self.supporting += 1
else:
if ally.vec_to_ball.length() < self.bot_car.vec_to_ball.length()+500:
self.supporting += 1
except:
stub="don't want to manually catch div 0"
if self.ball.location.within(self.boxes[0][0],self.boxes[0][1]) or self.ball.location.within(self.boxes[1][0],self.boxes[1][1]):
#box panic and dunk, both in one
self.supporting = max(0,self.supporting-1)
#draw in boxes
for xy in [(-1600,4140),(-1600,-4140),(1600,4140),(1600,-4140)]:
self.renderer.draw_line_3d(Vec3(xy[0],xy[1],0), Vec3(xy[0],xy[1],2000), self.renderer.red())
"""clear check"""
in_half = self.ball.location.y < -2000 if self.bot_car.team==0 else self.ball.location.y > 2000
self.clearing = in_half
"""shoot check"""
self.shooting=True
"""air recovery check"""
if self.current_strike is None:
self.air_recovery = not self.bot_car.grounded and self.bot_car.location.z>100
else:
self.air_recovery = not self.bot_car.grounded and self.current_strike.strike_type != "will add the aerial strike code later" and self.bot_car.location.z>100
"""if ball threatening net but not on target overide"""
if self.supporting ==0 and self.ball.location.y*math.copysign(1,self.bot_car.team*2-1)>3000:
self.defending = True
"""defending, but third override"""
if self.supporting==2 and self.defending:
self.defending==False
#dribble code is just linear target code with no offset and a little bit of turning
if self.air_recovery:
self.perform_air_recovery(packet)
self.renderer.draw_string_3d(self.bot_car.location, 1, 1, f'AIR RECOVERY', self.renderer.white())
elif self.defending:
self.defend(packet)
self.renderer.draw_string_3d(self.bot_car.location, 1, 1, f'DEFENDING', self.renderer.white())
elif self.rotating:
self.rotate(packet)
self.renderer.draw_string_3d(self.bot_car.location, 1, 1, f'ROTATING', self.renderer.white())
elif self.supporting>0:
self.support(packet,self.supporting)
self.renderer.draw_string_3d(self.bot_car.location, 1, 1, f'SUPPORTING', self.renderer.white())
elif self.clearing:
self.clear(packet)
self.renderer.draw_string_3d(self.bot_car.location, 1, 1, f'CLEARING', self.renderer.white())
elif self.shooting:
self.shoot(packet)
self.renderer.draw_string_3d(self.bot_car.location, 1, 1, f'SHOOTING', self.renderer.white())
return self.controls
"""tools"""
def steer_toward(self,car: Car, target:Vec3):
#always call after throttle set
angle = car.orientation.forward.signed_ang_to(target-car.location)
if angle<-1.7 and car.grounded and car.vec_to_ball.flat().length()>500:
self.controls.steer = -1
if car.velocity.length()>1200:
self.controls.throttle*=-1
self.controls.handbrake=True
elif angle<-0.1 and car.grounded:
self.controls.steer = -1
self.controls.handbrake=False
elif angle>1.7 and car.grounded and car.vec_to_ball.flat().length()>500:
self.controls.steer = 1
if car.velocity.length()>1200:
self.controls.throttle*=-1
self.controls.handbrake=True
elif angle>0.1 and car.grounded:
self.controls.steer = 1
self.controls.handbrake=False
else:
self.controls.steer = 0
self.controls.handbrake=False
def point_in_field(self,vec):
if abs(vec.x)>4096:
vec = vec * (4096/abs(vec.x))
if abs(vec.y)>5120:
vec = vec * (5120/abs(vec.y))
if abs(vec.z)>2044:
vec = vec * (2044/abs(vec.z))
return vec
"""Routines"""
def shoot(self,packet):
#get vector of ball to the back of other net
ideal_shot = Vec3(0,6000,0) - self.ball.location.flat() if self.bot_car.team==0 else Vec3(0,-6000,0) - self.ball.location.flat()
#continue any strike after checking it
if self.current_strike is not None:
if check_strike(packet, self.ball_prediction, self.current_strike):
self.active_sequence, strike_controls, strike_location, strike_time = execute_strike(packet,self.bot_car,self.current_strike,self.foes)
self.controls = strike_controls
self.renderer.draw_rect_3d(strike_location, 8, 8, True, self.renderer.red(), centered=True)
self.renderer.draw_line_3d(self.bot_car.location, strike_location, self.renderer.white())
self.renderer.draw_string_2d(20,20,3,3,f"throttle: {self.controls.throttle}",self.renderer.white())
return
else:
self.current_strike = None
#try to find strikes
if self.ball.velocity.length()!=0 and self.current_strike is None and self.bot_car.stable:
strikes = find_strikes(packet, self.ball_prediction, self.bot_car, ideal_shot)
for strike in strikes:
if strike.strike_type==strike_types.simple_linear:
#linear
if (Vec3(0,6000*(1-self.bot_car.team*2),0) - strike.slice_location).ang_to(self.bot_car.orientation.forward) < 2:
#if linear strike is not a massive angle from the goal
if self.current_strike is not None:
self.current_strike = strike if strike.slice_time<self.current_strike.slice_time else self.current_strike
else:
self.current_strike = strike
elif strike.strike_type==strike_types.linear_jump:
#long jump
if (Vec3(0,6000*(1-self.bot_car.team*2-1),0) - strike.slice_location).ang_to(self.bot_car.orientation.forward) < 2:
#if linear strike is not a massive angle from the goal
if self.current_strike is not None:
self.current_strike = strike if strike.slice_time<self.current_strike.slice_time else self.current_strike
else:
self.current_strike = strike
elif strike.strike_type==strike_types.linear_dblj:
#double jump
if (Vec3(0,6000*(1-self.bot_car.team*2-1),0) - strike.slice_location).ang_to(self.bot_car.orientation.forward) < 2:
#if linear strike is not a massive angle from the goal
if self.current_strike is not None:
self.current_strike = strike if strike.slice_time<self.current_strike.slice_time else self.current_strike
else:
self.current_strike = strike
#execute straight away if one was chosen
if self.current_strike is not None:
self.active_sequence, strike_controls, strike_location, strike_time = execute_strike(packet,self.bot_car,self.current_strike,self.foes)
self.controls = strike_controls
self.renderer.draw_rect_3d(strike_location, 8, 8, True, self.renderer.red(), centered=True)
self.renderer.draw_line_3d(self.bot_car.location, strike_location, self.renderer.white())
self.renderer.draw_string_2d(20,20,3,3,f"throttle: {self.controls.throttle}",self.renderer.white())
return
#position to get a shot on the ball
future_location, future_velocity = Vec3(self.ball.location), Vec3(self.ball.velocity)
future_slice = find_slice_at_time(self.ball_prediction,packet.game_info.seconds_elapsed + 2)
if future_slice is not None:
future_location = Vec3(future_slice.physics.location)
future_velocity = Vec3(future_slice.physics.velocity)
self.renderer.draw_line_3d(self.ball.location, future_location, self.renderer.cyan())
target_location = self.point_in_field(future_location.flat()+ideal_shot.rescale(-500))
self.controls.throttle = 1.0
self.steer_toward(self.bot_car, target_location)
self.renderer.draw_rect_3d(target_location, 8, 8, True, self.renderer.cyan(), centered=True)
self.renderer.draw_line_3d(self.bot_car.location, target_location, self.renderer.white())
return
##################################################################################################
def clear(self,packet):
#get vector of ball from the cone of own net
ideal_shot = self.ball.location.flat() - Vec3(0,-8000,0) if self.bot_car.team==0 else self.ball.location.flat() - Vec3(0,8000,0)
#find a future position based off the distance from the ball, using the current location as a backup
future_location = self.ball.location
future_velocity = self.ball.velocity
| |
vessel_type='Floating roof',
vessel_material='Stainless steel')
T620.line = 'SAStorageTank'
T620_P = units.TALPump('T620_P', ins=T620-0, outs=SA)
# # 7-day storage time, similar to ethanol's in Humbird et al.
# T607 = units.TALStorageTank('T607', ins=D402_H-0, tau=7*24, V_wf=0.9,
# vessel_type='Floating roof',
# vessel_material='Stainless steel')
# T607.line = 'AcetoinStorageTank'
# T607_P = units.TALPump('T607_P', ins=T607-0, outs=Acetoin)
# # 7-day storage time, similar to ethanol's in Humbird et al.
# T608 = units.TALStorageTank('T608', ins=D403_H-0, tau=7*24, V_wf=0.9,
# vessel_type='Floating roof',
# vessel_material='Stainless steel')
# T608.line = 'IBAStorageTank'
# T608_P = units.TALPump('T608_P', ins=T608-0, outs=IBA)
CIP = facilities.CIP('CIP901', ins=CIP_chems_in, outs='CIP_chems_out')
ADP = facilities.ADP('ADP902', ins=plant_air_in, outs='plant_air_out',
ratio=get_flow_tpd()/2205)
FWT = units.FireWaterTank('FWT903', ins=fire_water_in, outs='fire_water_out')
CWP = facilities.CWP('CWP802', ins='return_chilled_water',
outs='process_chilled_water')
# M505-0 is the liquid/solid mixture, R501-0 is the biogas, blowdown is discharged
# BT = facilities.BT('BT', ins=(M505-0, R501-0,
# FGD_lime, boiler_chems,
# baghouse_bag, natural_gas,
# 'BT_makeup_water'),
# B_eff=0.8, TG_eff=0.85,
# combustibles=combustibles,
# side_streams_to_heat=(water_M201, water_M202, steam_M203),
# outs=('gas_emission', ash, 'boiler_blowdown_water'))
BT = bst.facilities.BoilerTurbogenerator('BT701',
ins=(M505-0,
R501-0,
'boiler_makeup_water',
'natural_gas',
'lime',
'boilerchems'),
outs=('gas_emission', 'boiler_blowdown_water', ash,),
turbogenerator_efficiency=0.85,
natural_gas_price=price['Natural gas'])
# BT = bst.BDunits.BoilerTurbogenerator('BT',
# ins=(M505-0, R501-0, 'boiler_makeup_water', 'natural_gas', FGD_lime, boiler_chems),
# boiler_efficiency=0.80,
# turbogenerator_efficiency=0.85)
# Blowdown is discharged
CT = facilities.CT('CT801', ins=('return_cooling_water', cooling_tower_chems,
'CT_makeup_water'),
outs=('process_cooling_water', 'cooling_tower_blowdown'))
# All water used in the system, here only consider water usage,
# if heating needed, then heeating duty required is considered in BT
process_water_streams = (enzyme_water,
aerobic_caustic,
CIP.ins[-1], BT.ins[-1], CT.ins[-1])
PWC = facilities.PWC('PWC904', ins=(system_makeup_water, S504-0),
process_water_streams=process_water_streams,
recycled_blowdown_streams=None,
outs=('process_water', 'discharged_water'))
# Heat exchange network
HXN = bst.facilities.HeatExchangerNetwork('HXN1001',
ignored=lambda:[
H401,
H402,
H403,
H404,
AC401.heat_exchanger_drying,
AC401.heat_exchanger_regeneration,
F401.components['condenser'],
],
cache_network=True,
force_ideal_thermo=True,
)
# HXN = HX_Network('HXN')
# %%
# =============================================================================
# Complete system
# =============================================================================
TAL_sys = create_TAL_sys()
f = bst.main_flowsheet
u = f.unit
s = f.stream
feedstock = s.feedstock
SA = s.SA
get_flow_tpd = lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/907.185
TEA_feeds = set([i for i in TAL_sys.feeds if i.price]+ \
[i for i in TAL_sys.feeds if i.price])
TEA_products = set([i for i in TAL_sys.products if i.price]+ \
[i for i in TAL_sys.products if i.price]+[SA])
for ui in u:
globals().update({ui.ID: ui})
# %%
# =============================================================================
# TEA
# =============================================================================
# TAL_tea = CellulosicEthanolTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
# depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
# lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
# startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
# startup_VOCfrac=0.75, WC_over_FCI=0.05,
# finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# # biosteam Splitters and Mixers have no cost,
# # cost of all wastewater treatment units are included in WWT_cost,
# # BT is not included in this TEA
# OSBL_units=(u.U101, u.WWT_cost,
# u.T601, u.T602, u.T603, u.T606, u.T606_P,
# u.CWP, u.CT, u.PWC, u.CIP, u.ADP, u.FWT, u.BT),
# warehouse=0.04, site_development=0.09, additional_piping=0.045,
# proratable_costs=0.10, field_expenses=0.10, construction=0.20,
# contingency=0.10, other_indirect_costs=0.10,
# labor_cost=3212962*get_flow_tpd()/2205,
# labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
# steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT)
# TAL_no_BT_tea = TAL_tea
TAL_tea = TALTEA(system=TAL_sys, IRR=0.10, duration=(2016, 2046),
depreciation='MACRS7', income_tax=0.21, operating_days=0.9*365,
lang_factor=None, construction_schedule=(0.08, 0.60, 0.32),
startup_months=3, startup_FOCfrac=1, startup_salesfrac=0.5,
startup_VOCfrac=0.75, WC_over_FCI=0.05,
finance_interest=0.08, finance_years=10, finance_fraction=0.4,
# biosteam Splitters and Mixers have no cost,
# cost of all wastewater treatment units are included in WWT_cost,
# BT is not included in this TEA
OSBL_units=(u.U101, u.WWTcost501,
# u.T601, u.T602,
u.T603, u.T604, u.T620,
# u.T606, u.T606_P,
u.CWP802, u.CT801, u.PWC904, u.CIP901, u.ADP902, u.FWT903, u.BT701),
warehouse=0.04, site_development=0.09, additional_piping=0.045,
proratable_costs=0.10, field_expenses=0.10, construction=0.20,
contingency=0.10, other_indirect_costs=0.10,
labor_cost=3212962*get_flow_tpd()/2205,
labor_burden=0.90, property_insurance=0.007, maintenance=0.03,
steam_power_depreciation='MACRS20', boiler_turbogenerator=u.BT701)
TAL_no_BT_tea = TAL_tea
# # Removed because there is not double counting anyways.
# # Removes feeds/products of BT_sys from TAL_sys to avoid double-counting
# for i in BT_sys.feeds:
# TAL_sys.feeds.remove(i)
# for i in BT_sys.products:
# TAL_sys.products.remove(i)
# Boiler turbogenerator potentially has different depreciation schedule
# BT_tea = bst.TEA.like(BT_sys, TAL_no_BT_tea)
# BT_tea.labor_cost = 0
# Changed to MACRS 20 to be consistent with Humbird
# BT_tea.depreciation = 'MACRS20'
# BT_tea.OSBL_units = (BT,)
#%% Define unit groups
area_names = [
'feedstock',
# 'pretreatment',
'conversion',
'separation',
'wastewater',
'storage',
'co-heat and power',
'cooling tower and chilled water package',
'other facilities',
'heat exchanger network',
]
# u.CWP901.ID = 'CWP802'
for ui in u:
if type(ui) == bst.ChilledWaterPackage:
ui.ID = 'CWP802' # group with CT for system cooling demand
break
unit_groups = bst.UnitGroup.group_by_area(TAL_sys.units)
unit_groups.append(bst.UnitGroup('natural gas'))
for i, j in zip(unit_groups, area_names): i.name = j
for i in unit_groups: i.autofill_metrics(shorthand=True,
electricity_production=True,
material_cost=True)
for i in unit_groups:
if i.name == 'storage' or i.name=='other facilities' or i.name == 'cooling tower and chilled water package':
i.metrics[-1].getter = lambda: 0. # Material cost
if i.name == 'cooling tower and chilled water package':
i.metrics[1].getter = lambda: 0. # Cooling duty
HXN = None
for HXN_group in unit_groups:
if HXN_group.name == 'heat exchanger network':
HXN_group.filter_savings = False
HXN = HXN_group.units[0]
assert isinstance(HXN, bst.HeatExchangerNetwork)
unit_groups[-1].metrics[-1] = bst.evaluation.Metric('Mat. cost',
getter=lambda: BT.natural_gas_price * BT.natural_gas.F_mass,
units='USD/hr',
element=None)
unit_groups_dict = {}
for i in unit_groups:
unit_groups_dict[i.name] = i
# HXN.force_ideal_thermo = True
CT = u.CT801
BT = u.BT701
CWP = u.CWP802
# %%
# =============================================================================
# Simulate system and get results
# =============================================================================
def get_SA_MPSP():
for i in range(3):
TAL_sys.simulate()
for i in range(3):
SA.price = TAL_tea.solve_price(SA)
return SA.price*SA.F_mass/SA.imass['TAL']
spec = ProcessSpecification(
evaporator = None,
pump = None,
mixer = u.M304,
heat_exchanger = u.M304_H,
seed_train_system = [],
reactor= u.R302,
reaction_name='fermentation_reaction',
substrates=('Xylose', 'Glucose'),
products=('TAL',),
spec_1=0.19,
spec_2=15.,
spec_3=0.19,
xylose_utilization_fraction = 0.80,
feedstock = feedstock,
dehydration_reactor = None,
byproduct_streams = [],
HXN = u.HXN1001,
maximum_inhibitor_concentration = 1.,
# pre_conversion_units = process_groups_dict['feedstock_group'].units + process_groups_dict['pretreatment_group'].units + [u.H301], # if the line below does not work (depends on BioSTEAM version)
pre_conversion_units = TAL_sys.split(u.M304.ins[0])[0],
# set baseline fermentation performance here
baseline_yield = 0.19,
baseline_titer = 15.,
baseline_productivity = 0.19,
# baseline_yield = 0.30,
# baseline_titer = 25.,
# baseline_productivity = 0.19,
feedstock_mass = feedstock.F_mass,
pretreatment_reactor = None)
spec.load_spec_1 = spec.load_yield
# spec.load_spec_2 = spec.load_titer
spec.load_spec_3 = spec.load_productivity
def M304_titer_obj_fn(water_to_sugar_mol_ratio):
M304, R302 = u.M304, u.R302
M304.water_to_sugar_mol_ratio = water_to_sugar_mol_ratio
M304.specification[0][0]()
u.M304_H._run()
u.S302._run()
u.R303._run()
u.T301._run()
R302.specification[0][0]()
# broth = R302.outs[0]
# return broth.imass['TAL']/broth.F_vol - R302.titer_to_load
return R302.effluent_titer - R302.titer_to_load
def load_titer_with_glucose(titer_to_load):
spec.spec_2 = titer_to_load
u.R302.titer_to_load = titer_to_load
flx.IQ_interpolation(M304_titer_obj_fn, 1e-3, 20000.)
u.AC401.regeneration_velocity = min(14.4, 3.1158 + ((14.4-3.1158)/(30.-3.))*(titer_to_load-3.)) # heuristic to obtain regeneration velocity at which MPSP is minimum fitted to results from simulations at target_recovery=0.99
# u.AC401.regeneration_velocity = 14.4
spec.load_spec_2 = load_titer_with_glucose
# path = (F301, R302)
# @np.vectorize
# def calculate_titer(V):
# F301.V = V
# for i in path: i._run()
# return spec._calculate_titer()
# @np.vectorize
# def calculate_MPSP(V):
# F301.V = V
# TAL_sys.simulate()
# MPSP = SA.price = TAL_tea.solve_price(SA, TAL_no_BT_tea)
# return MPSP
# vapor_fractions = np.linspace(0.20, 0.80)
# titers = calculate_titer(vapor_fractions)
# MPSPs = calculate_MPSP(vapor_fractions)
# import matplotlib.pyplot as plt
# plt.plot(vapor_fractions, titers)
# plt.show()
# plt.plot(titers, MPSPs)
# plt.show()
# %% Full analysis
def simulate_and_print():
get_SA_MPSP()
print('\n---------- Simulation Results ----------')
print(f'MPSP is ${get_SA_MPSP():.3f}/kg')
# print(f'GWP is {get_GWP():.3f} kg CO2-eq/kg SA')
# print(f'FEC is {get_FEC():.2f} MJ/kg SA or {get_FEC()/SA_LHV:.2f} MJ/MJ SA')
# print(f'SPED is {get_SPED():.2f} MJ/kg SA or {get_SPED()/SA_LHV:.2f} MJ/MJ SA')
# print('--------------------\n')
# simulate_and_print()
# TAL_sys.simulate()
get_SA_MPSP()
# u.AC401.cycle_time = 4.
u.AC401.drying_time = 0.5 #!!! Drying time is updated to this value (overwritten the value passed during initialization)
spec.load_specifications(spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity)
simulate_and_print()
# %%
# =============================================================================
# For Monte Carlo and analyses
# =============================================================================
TAL_sub_sys = {
# 'feedstock_sys': (U101,),
# 'pretreatment_sys': (T201, M201, M202, M203,
# R201, R201_H, T202, T203,
# F201, F201_H,
# M204, T204, T204_P,
# M205, M205_P),
# 'conversion_sys': (H301, M301, M302, R301, R302, T301),
# 'separation_sys': (S401, M401, M401_P,
# S402,
# # F401, F401_H, X401,
# D401, D401_H, D401_P, S403,
# M402_P, S403,
# D403, D403_H, D403_P,
# M501,
# T606, T606_P, T607, T607_P)
# F402, F402_H, F402_P,
# D405, D405_H1, D405_H2, D405_P,
# M401, M401_P)
# 'wastewater_sys': (M501, WWT_cost, R501,
# M502, R502, S501, S502, M503,
# M504, S503, S504, M505),
# 'HXN': (HXN,),
# 'BT': (BT,),
# 'CT': (CT,),
# 'other_facilities': (T601, S601,
# T602, T603,
# T604, T604_P,
# T605, T605_P,
# T606, T606_P,
# PWC, CIP, ADP, FWT)
}
# for unit in sum(TAL_sub_sys.values(), ()):
# if not unit in TAL_sys.units:
# print(f'{unit.ID} not in TAL_sys.units')
# for unit in TAL_sys.units:
# if not unit in sum(TAL_sub_sys.values(), ()):
# print(f'{unit.ID} not in TAL_sub_sys')
#%% TEA breakdown
def TEA_breakdown(print_output=False):
metric_breakdowns = {i.name: {} for i in unit_groups[0].metrics}
for ug in unit_groups:
for metric in ug.metrics:
# storage_metric_val = None
if not ug.name=='storage':
if ug.name=='other facilities':
metric_breakdowns[metric.name]['storage and ' + ug.name] = metric() + unit_groups_dict['storage'].metrics[ug.metrics.index(metric)]()
else:
metric_breakdowns[metric.name][ug.name] = metric()
# if ug.name=='natural gas':
# if metric.name=='Mat. cost':
# metric_breakdowns[metric.name][ug.name] = BT.natural_gas.F_mass*BT.natural_gas_price
# else:
# storage_metric_val = metric()
# print and return metric_breakdowns
if print_output:
for i in unit_groups[0].metrics:
print(f"\n\n----- {i.name} ({i.units}) -----")
metric_breakdowns_i = metric_breakdowns[i.name]
for j in | |
a 3D array of shape (n_lons, n_lats, n_heights)
containing the TEC data.
- If an epoch is "None", no epoch was provided for that map.
rms_maps: dictionary, with the same specification as tec_maps except with RMS data.
height_maps: dictionary, with the same specification as tec_maps except with height map data.
satellite_biases: dictionary with keys "GPS" and "GLONASS"
- satellite_biases['GPS'] contains another dictionary (keyed by PRN). Each
dictionary value is a 2-tuple, where the first element is the satellite bias,
and the second element is the rms.
- satellite_biases['GLONASS'] is the same as the GPS case, but for GLONASS.
station_biases: dictionary with keys "GPS" and "GLONASS"
- station_biases['GPS'] contains another dictionary (keyed by site ID).
Each dictionary value is a 3-tuple, where the first element is the bias,
the second element is the rms, and the third element is the DOMES number.
- station_biases['GLONASS'] is the same as the GPS case, but for GLONASS.
The description of the DOMES numbering can be found
in MERIT/COTES JOINT WORKING GROUPS, MERIT CAMPAIGN: CONNECTION
OF REFERENCE FRAMES, IMPLEMENTATION PLAN, 1983
"""
with open(path_to_file, 'rb') as ionex_file:
header_info, satellite_biases, station_biases = read_header(ionex_file)
starting_epoch = header_info['start_epoch']
stopping_epoch = header_info['stop_epoch']
# import time
# starting_sec = time.mktime(starting_epoch.timetuple())
# stopping_sec = time.mktime(stopping_epoch.timetuple())
starting_ht = header_info['height1']
stopping_ht = header_info['height2']
dh = header_info['dh']
starting_lat = header_info['lat1']
stopping_lat = header_info['lat2']
dlat = header_info['dlat']
starting_lon = header_info['lon1']
stopping_lon = header_info['lon2']
dlon = header_info['dlon']
exponent = header_info['exponent']
if dh == 0:
n_heights = 1
else:
n_heights = int((stopping_ht - starting_ht)/dh) + 1
if dlat == 0:
n_lats = 1
else:
n_lats = int((stopping_lat - starting_lat)/dlat) + 1
if dlon == 0:
n_lons = 1
else:
n_lons = int((stopping_lon - starting_lon)/dlon) + 1
if n_lons > 1:
lons = np.arange(starting_lon, stopping_lon + dlon, dlon)
else:
lons = np.array([starting_lon])
if n_lats > 1:
lats = np.arange(starting_lat, stopping_lat + dlat, dlat)
else:
lats = [starting_lat]
if n_heights > 1:
heights = np.arange(starting_ht, stopping_ht + dh, dh)
else:
heights = [starting_ht]
# maps are keyed by their map_id #
tec_maps = {}
rms_maps = {}
height_maps = {}
done = False
while not done:
# CHECK WHICH TYPE OF MAP THE NEXT DATA BLOCK IS FOR #
# MAPS ARE STORED IN A DICTIONARY THAT IS KEYED BY THE ID OF THE MAP #
label, content = read_misc_line(ionex_file)
if label is None:
done = True
elif 'START OF TEC MAP' in label:
split = content.split()
try:
map_id = int(split[0])
except Exception as e:
print(e)
raise
epoch, data_block = read_map(ionex_file, n_lons, n_lats, n_heights, exponent)
tec_maps[map_id] = (epoch, data_block)
elif 'START OF RMS MAP' in label:
split = content.split()
try:
map_id = int(split[0])
except Exception as e:
print(e)
raise
epoch, data_block = read_map(ionex_file, n_lons, n_lats, n_heights, exponent)
rms_maps[map_id] = (epoch, data_block)
elif 'START OF HEIGHT MAP' in label:
split = content.split()
try:
map_id = int(split[0])
except Exception as e:
print(e)
raise
epoch, data_block = read_map(ionex_file, n_lons, n_lats, n_heights, exponent)
height_maps[map_id] = (epoch, data_block)
return lons, lats, heights, tec_maps, rms_maps, height_maps, satellite_biases, station_biases
def raw(path_to_file):
"""
raw:
Reads in the IONEX file path_to_file and returns the data.
The return value will be a 7-tuple. The first two entires are 1D numpy arrays of
the longitude and latitude grid, respectively. The third entry in the tuple is
a list of datetimes that define the time grid. The fourth entry in the tuple is
a 3D numpy array with shape (n_lons, n_lats, n_times) containing TEC data. The fifth
entry in the tuple is a 3D numpy array with shape (n_lons, n_lats, n_times)
containing RMS data (if it was present in the file, otherwise it will be None).
n_times is the size of the time grid in the IONEX file, and n_lons, n_lats are the
size of the longitude and latitude grids in the IONEX file. The sixth and seventh
entries are the satellite biases and station biases, respectively.
satellite_biases: dictionary with keys "GPS" and "GLONASS"
- satellite_biases['GPS'] contains another dictionary (keyed by PRN). Each
dictionary value is a 2-tuple, where the first element is the satellite bias,
and the second element is the rms.
- satellite_biases['GLONASS'] is the same as the GPS case, but for GLONASS.
station_biases: dictionary with keys "GPS" and "GLONASS"
- station_biases['GPS'] contains another dictionary (keyed by site ID).
Each dictionary value is a 3-tuple, where the first element is the bias,
the second element is the rms, and the third element is the DOMES number.
- station_biases['GLONASS'] is the same as the GPS case, but for GLONASS.
The description of the DOMES numbering can be found
in MERIT/COTES JOINT WORKING GROUPS, MERIT CAMPAIGN: CONNECTION
OF REFERENCE FRAMES, IMPLEMENTATION PLAN, 1983
"""
lons, lats, heights, tec_maps, rms_maps, height_maps, satellite_biases, station_biases = parser(path_to_file)
time_grid = [tec_maps[key][0] for key in tec_maps]
tec_grids = np.zeros([len(lons), len(lats), len(time_grid)])
for i, key in enumerate(tec_maps):
tec_grids[:,:,i] = tec_maps[key][1][:,:,0]
# rms grids are not required to be specified in the IONEX standard
if len(rms_maps) > 0:
rms_grids = np.zeros([len(lons), len(lats), len(time_grid)])
for i, key in enumerate(tec_maps):
rms_grids[:,:,i] = rms_maps[key][1][:,:,0]
else:
rms_grids = None
return (lons, lats, time_grid, tec_grids, rms_grids, satellite_biases, station_biases)
def decreasing(array):
if len(array) > 1 and array[1] < array[0]:
return True
else:
return False
def interpolate2D_spatial(path_to_file, spatial_grid, method = 'linear'):
"""
interpolate2D_spatial:
Reads in the IONEX file path_to_file and performs spatial interpolation on the data in 2D.
spatial_grid is interpreted as a 2-tuple. The first entry in the tuple should be a 1D list
of longitudes. The second entry in the tuple should be a 1D list of latitudes.
The return value will be a 5-tuple. The first entry in the tuple is a 1D list of datetimes that
define the time grid. The last two entries are 3D numpy arrays with shape
(n_lons, n_lats, n_times) where n_times is the length of the time grid in the IONEX file and
n_lons, n_lats are the length of the longitude and latitude arrays specified in spatial_grid.
If the RMS data is not available in the IONEX file, None is returned in place of the 3D numpy
array. The fourth and fifth entries are the satellite biases and station biases, respectively.
satellite_biases: dictionary with keys "GPS" and "GLONASS"
- satellite_biases['GPS'] contains another dictionary (keyed by PRN). Each
dictionary value is a 2-tuple, where the first element is the satellite bias,
and the second element is the rms.
- satellite_biases['GLONASS'] is the same as the GPS case, but for GLONASS.
station_biases: dictionary with keys "GPS" and "GLONASS"
- station_biases['GPS'] contains another dictionary (keyed by site ID).
Each dictionary value is a 3-tuple, where the first element is the bias,
the second element is the rms, and the third element is the DOMES number.
- station_biases['GLONASS'] is the same as the GPS case, but for GLONASS.
The description of the DOMES numbering can be found
in MERIT/COTES JOINT WORKING GROUPS, MERIT CAMPAIGN: CONNECTION
OF REFERENCE FRAMES, IMPLEMENTATION PLAN, 1983
spatial_method specifies the spatial interpolation type:
'nearest' or 'linear'
default: 'linear'
"""
# TODO: unify outputs, write documentation, generate movie
lons, lats, heights, tec_maps, rms_maps, height_maps, satellite_biases, station_biases = parser(path_to_file)
time_grid = [tec_maps[key][0] for key in tec_maps]
si_grid_tec = np.zeros([len(spatial_grid[0]), len(spatial_grid[1]), len(time_grid)])
if len(rms_maps) > 0:
si_grid_rms = np.zeros([len(spatial_grid[0]), len(spatial_grid[1]), len(time_grid)])
else:
si_grid_rms = None
if spatial_grid is not None:
# SPATIAL INTERPOLATION #
from scipy import interpolate
# FORCE THE DATA TO BE STRICTLY INCREASING #
if decreasing(lons):
lons = lons[::-1]
for key in tec_maps:
tec_maps[key][1][:,:,0] = tec_maps[key][1][::-1,:,0]
if len(rms_maps) > 0:
rms_maps[key][1][:,:,0] = rms_maps[key][1][::-1,:,0]
if decreasing(lats):
lats = lats[::-1]
for key in tec_maps:
tec_maps[key][1][:,:,0] = tec_maps[key][1][:,::-1,0]
if len(rms_maps) > 0:
rms_maps[key][1][:,:,0] = rms_maps[key][1][:,::-1,0]
for i, key in enumerate(tec_maps):
tec_map = tec_maps[key][1][:,:,0]
s_interpolator_tec = interpolate.RegularGridInterpolator((lons, lats), tec_map, method = method, bounds_error = False)
if len(rms_maps) > 0:
rms_map = rms_maps[key][1][:,:,0]
s_interpolator_rms = interpolate.RegularGridInterpolator((lons, lats), rms_map, method = method, | |
<reponame>darobin/critic<filename>testing/virtualbox.py
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2013 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import sys
import os
import subprocess
import time
import fcntl
import select
import errno
import datetime
import testing
def flag_pwd_independence(commit_sha1):
lstree = subprocess.check_output(
["git", "ls-tree", commit_sha1, "testing/flags/pwd-independence.flag"])
return bool(lstree.strip())
def flag_minimum_password_hash_time(commit_sha1):
try:
subprocess.check_call(
["git", "grep", "--quiet", "-e", "--minimum-password-hash-time",
commit_sha1, "--", "installation/config.py"])
except subprocess.CalledProcessError:
return False
else:
return True
# Directory (on guest system) to store coverage data in.
COVERAGE_DIR = "/var/tmp/critic/coverage"
def setnonblocking(fd):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
class HostCommandError(testing.InstanceError):
def __init__(self, command, output):
super(HostCommandError, self).__init__(
"HostCommandError: %s\nOutput:\n%s" % (command, output))
self.command = command
self.output = output
class GuestCommandError(testing.InstanceError):
def __init__(self, command, stdout, stderr=None):
super(GuestCommandError, self).__init__(
"GuestCommandError: %s\nOutput:\n%s" % (command, stderr or stdout))
self.command = command
self.stdout = stdout
self.stderr = stderr
class Instance(object):
def __init__(self, vboxhost, identifier, snapshot, hostname, ssh_port,
install_commit=None, upgrade_commit=None, frontend=None,
strict_fs_permissions=False, coverage=False):
self.vboxhost = vboxhost
self.identifier = identifier
self.snapshot = snapshot
self.hostname = hostname or identifier
self.ssh_port = ssh_port
if install_commit:
self.install_commit, self.install_commit_description = install_commit
if upgrade_commit:
self.upgrade_commit, self.upgrade_commit_description = upgrade_commit
self.frontend = frontend
self.strict_fs_permissions = strict_fs_permissions
self.coverage = coverage
self.mailbox = None
self.__started = False
# Check that the identified VM actually exists:
output = subprocess.check_output(
["VBoxManage", "list", "vms"],
stderr=subprocess.STDOUT)
if not self.__isincluded(output):
raise testing.Error("Invalid VM identifier: %s" % identifier)
# Check that the identified snapshot actually exists (and that there
# aren't multiple snapshots with the same name):
count = self.count_snapshots(snapshot)
if count == 0:
raise testing.Error("Invalid VM snapshot: %s (not found)" % snapshot)
elif count > 1:
raise testing.Error("Invalid VM snapshot: %s (matches multiple snapshots)" % snapshot)
def __enter__(self):
return self
def __exit__(self, *args):
if self.__started:
self.stop()
return False
def __vmcommand(self, command, *arguments):
argv = ["VBoxManage", command, self.identifier] + list(arguments)
try:
testing.logger.debug("Running: " + " ".join(argv))
return subprocess.check_output(argv, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
raise HostCommandError(" ".join(argv), error.output)
def __isincluded(self, output):
name = '"%s"' % self.identifier
uuid = '{%s}' % self.identifier
for line in output.splitlines():
if name in line or uuid in line:
return True
else:
return False
def isrunning(self):
output = subprocess.check_output(
["VBoxManage", "list", "runningvms"],
stderr=subprocess.STDOUT)
return self.__isincluded(output)
def state(self):
output = self.__vmcommand("showvminfo", "--machinereadable")
for line in output.splitlines():
if line.startswith("VMState="):
return eval(line[len("VMState="):])
return "<not found>"
def count_snapshots(self, identifier):
try:
output = subprocess.check_output(
["VBoxManage", "snapshot", self.identifier, "list"],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
# Assuming we've already checked that 'self.identifier' is a valid
# VM identifier, the most likely cause of this failure is that the
# VM has no snapshots.
return 0
else:
name = "Name: %s (" % identifier
uuid = "(UUID: %s)" % identifier
count = 0
for line in output.splitlines():
if name in line or uuid in line:
count += 1
return count
def wait(self):
testing.logger.debug("Waiting for VM to come online ...")
while True:
try:
self.execute(["true"], timeout=1)
except GuestCommandError:
time.sleep(0.5)
else:
break
def start(self):
testing.logger.debug("Starting VM: %s ..." % self.identifier)
self.__vmcommand("snapshot", "restore", self.snapshot)
self.__vmcommand("startvm", "--type", "headless")
self.__started = True
self.wait()
# Set the guest system's clock to match the host system's. Since we're
# restoring the same snapshot over and over, the guest system's clock is
# probably quite far from the truth.
now = datetime.datetime.utcnow().strftime("%m%d%H%M%Y.%S")
self.execute(["sudo", "date", "--utc", now])
testing.logger.info("Started VM: %s" % self.identifier)
def stop(self):
testing.logger.debug("Stopping VM: %s ..." % self.identifier)
self.__vmcommand("controlvm", "poweroff")
while self.state() != "poweroff":
time.sleep(0.1)
# It appears the VirtualBox "session" can be locked for a while after
# the "controlvm poweroff" command, and possibly after the VM state
# changes to "poweroff", so sleep a little longer to avoid problems.
time.sleep(0.5)
testing.logger.info("Stopped VM: %s" % self.identifier)
def retake_snapshot(self, name):
index = 1
while True:
temporary_name = "%s-%d" % (name, index)
if self.count_snapshots(temporary_name) == 0:
break
index += 1
self.__vmcommand("snapshot", "take", temporary_name, "--pause")
self.__vmcommand("snapshot", "delete", name)
self.__vmcommand("snapshot", "edit", temporary_name, "--name", name)
def execute(self, argv, cwd=None, timeout=None, interactive=False):
guest_argv = list(argv)
if cwd is not None:
guest_argv[:0] = ["cd", cwd, "&&"]
host_argv = ["ssh"]
if self.ssh_port != 22:
host_argv.extend(["-p", str(self.ssh_port)])
if timeout is not None:
host_argv.extend(["-o", "ConnectTimeout=%d" % timeout])
if not interactive:
host_argv.append("-n")
host_argv.append(self.hostname)
testing.logger.debug("Running: " + " ".join(host_argv + guest_argv))
process = subprocess.Popen(
host_argv + guest_argv,
stdout=subprocess.PIPE if not interactive else None,
stderr=subprocess.PIPE if not interactive else None)
class BufferedLineReader(object):
def __init__(self, source):
self.source = source
self.buffer = ""
def readline(self):
try:
while self.source is not None:
try:
line, self.buffer = self.buffer.split("\n", 1)
except ValueError:
pass
else:
return line + "\n"
data = self.source.read(1024)
if not data:
self.source = None
break
self.buffer += data
line = self.buffer
self.buffer = ""
return line
except IOError as error:
if error.errno == errno.EAGAIN:
return None
raise
stdout_data = ""
stdout_reader = BufferedLineReader(process.stdout)
stderr_data = ""
stderr_reader = BufferedLineReader(process.stderr)
if not interactive:
setnonblocking(process.stdout)
setnonblocking(process.stderr)
poll = select.poll()
poll.register(process.stdout)
poll.register(process.stderr)
stdout_done = False
stderr_done = False
while not (stdout_done and stderr_done):
poll.poll()
while not stdout_done:
line = stdout_reader.readline()
if line is None:
break
elif not line:
poll.unregister(process.stdout)
stdout_done = True
break
else:
stdout_data += line
testing.logger.log(testing.STDOUT, line.rstrip("\n"))
while not stderr_done:
line = stderr_reader.readline()
if line is None:
break
elif not line:
poll.unregister(process.stderr)
stderr_done = True
break
else:
stderr_data += line
testing.logger.log(testing.STDERR, line.rstrip("\n"))
process.wait()
if process.returncode != 0:
raise GuestCommandError(" ".join(argv), stdout_data, stderr_data)
return stdout_data
def adduser(self, name, email=None, fullname=None, password=None):
if email is None:
email = <EMAIL>" % name
if fullname is None:
fullname = "%s von Testing" % name.capitalize()
if password is None:
password = "<PASSWORD>"
self.execute([
"sudo", "criticctl", "adduser", "--name", name, "--email", email,
"--fullname", "'%s'" % fullname, "--password", password,
"&&",
"sudo", "adduser", "--ingroup", "critic", "--disabled-password",
"--gecos", "''", name])
# Running all commands with a single self.execute() call is just an
# optimization; SSH sessions are fairly expensive to start.
self.execute([
"sudo", "mkdir", ".ssh",
"&&",
"sudo", "cp", "$HOME/.ssh/authorized_keys", ".ssh/",
"&&",
"sudo", "chown", "-R", name, ".ssh/",
"&&",
"sudo", "-H", "-u", name, "git", "config", "--global", "user.name",
"'%s'" % fullname,
"&&",
"sudo", "-H", "-u", name, "git", "config", "--global", "user.email",
email],
cwd="/home/%s" % name)
def restrict_access(self):
if not self.strict_fs_permissions:
return
# Set restrictive access bits on home directory of the installing user
# and of root, to make sure that no part of Critic's installation
# process, or the background processes started by it, depend on being
# able to access them as the Critic system user.
self.execute(["sudo", "chmod", "-R", "go-rwx", "$HOME", "/root"])
# Running install.py may have left files owned by root in $HOME. The
# command above will have made them inaccessible for sure, so change
# the ownership back to us.
self.execute(["sudo", "chown", "-R", "$LOGNAME", "$HOME"])
def install(self, repository, override_arguments={}, other_cwd=False,
quick=False, interactive=False):
testing.logger.debug("Installing Critic ...")
if not interactive:
use_arguments = { "--headless": True,
"--system-hostname": self.hostname,
"--auth-mode": "critic",
"--session-type": "cookie",
"--admin-username": "admin",
"--admin-email": "<EMAIL>",
"--admin-fullname": "'Testing Administrator'",
"--admin-password": "<PASSWORD>",
"--smtp-host": self.vboxhost,
"--smtp-port": str(self.mailbox.port),
"--smtp-no-ssl-tls": True,
"--skip-testmail-check": True }
if self.mailbox.credentials:
use_arguments["--smtp-username"] = self.mailbox.credentials["username"]
use_arguments["--smtp-password"] = self.mailbox.credentials["password"]
if self.coverage:
use_arguments["--coverage-dir"] = COVERAGE_DIR
else:
use_arguments = { "--admin-password": "<PASSWORD>" }
if flag_minimum_password_hash_time(self.install_commit):
use_arguments["--minimum-password-hash-time"] = "0.01"
for name, value in override_arguments.items():
if value is None:
if name in use_arguments:
del use_arguments[name]
else:
use_arguments[name] = value
arguments = []
for name, value in use_arguments.items():
arguments.append(name)
if value is not True:
arguments.append(value)
# First install (if necessary) Git.
try:
self.execute(["git", "--version"])
except GuestCommandError:
testing.logger.debug("Installing Git ...")
self.execute(["sudo", "DEBIAN_FRONTEND=noninteractive",
"apt-get", "-qq", "update"])
self.execute(["sudo", "DEBIAN_FRONTEND=noninteractive",
"apt-get", "-qq", "-y", "install", "git-core"])
testing.logger.info("Installed Git: %s" % self.execute(["git", "--version"]).strip())
self.execute(["git", "clone", repository.url, "critic"])
self.execute(["git", "fetch", "--quiet", "&&",
"git", "checkout", "--quiet", self.install_commit],
cwd="critic")
if self.upgrade_commit:
output = subprocess.check_output(
["git", "log", "--oneline", self.install_commit, "--",
"background/servicemanager.py"])
for line in output.splitlines():
sha1, subject = line.split(" ", 1)
if subject == "Make sure | |
metadata
MetadataPrinter(self)
def report_parse(self, sero_report):
"""
Parse an existing report, and extract the results
:param sero_report: type STR: Name and absolute path of the report
"""
with open(sero_report, 'r') as report:
next(report)
for line in report:
data = line.rstrip().split(',')
for sample in self.runmetadata.samples:
if sample.name in line:
if data[1]:
setattr(sample, self.analysistype, GenObject())
o_results, h_results = data[1].split(':')
sample[self.analysistype].o_set = [o_results.split(' ')[0]]
try:
sample[self.analysistype].best_o_pid = o_results.split(' ')[1].replace('(', '') \
.replace(')', '')
except IndexError:
sample[self.analysistype].best_o_pid = 'ND'
sample[self.analysistype].h_set = [h_results.split(' ')[0]]
try:
sample[self.analysistype].best_h_pid = h_results.split(' ')[1].replace('(', '') \
.replace(')', '')
except IndexError:
sample[self.analysistype].best_h_pid = 'ND'
class ShortKSippingMethods(Sippr):
def main(self):
"""
Run the methods in the correct order for pipelines
"""
# Find the target files
self.targets()
kmer = 15 if self.analysistype == 'GDCS' else 17
# Use bbduk to bait the FASTQ reads matching the target sequences
self.bait(maskmiddle='t', k=kmer)
# If desired, use bbduk to bait the target sequences with the previously baited FASTQ files
if self.revbait:
self.reversebait(maskmiddle='t', k=kmer)
# Run the bowtie2 read mapping module
self.mapping()
# Use samtools to index the sorted bam file
self.indexing()
# Parse the results
# self.parsing()
self.parsebam()
# Filter out any sequences with cigar features such as internal soft-clipping from the results
# self.clipper()
class ResSippr(GeneSippr):
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
if not self.pipeline:
general = None
for sample in self.runmetadata.samples:
general = getattr(sample, 'general')
if general is None:
# Create the objects to be used in the analyses
objects = Objectprep(self)
objects.objectprep()
self.runmetadata = objects.samples
# Run the analyses
ShortKSippingMethods(inputobject=self,
cutoff=self.cutoff,
allow_soft_clips=self.allow_soft_clips)
# noinspection PyMissingConstructor
def __init__(self, args, pipelinecommit, startingtime, scriptpath, analysistype, cutoff, pipeline, revbait,
allow_soft_clips=False):
"""
:param args: command line arguments
:param pipelinecommit: pipeline commit or version
:param startingtime: time the script was started
:param scriptpath: home path of the script
:param analysistype: name of the analysis being performed - allows the program to find databases
:param cutoff: percent identity cutoff for matches
:param pipeline: boolean of whether this script needs to run as part of a particular assembly pipeline
:param allow_soft_clips: Boolean whether the BAM parsing should exclude sequences with internal soft clips
"""
# Initialise variables
# super().__init__(args, pipelinecommit, startingtime, scriptpath, analysistype, cutoff, pipeline, revbait)
self.commit = str(pipelinecommit)
self.starttime = startingtime
self.homepath = scriptpath
# Define variables based on supplied arguments
self.path = os.path.join(args.path)
assert os.path.isdir(self.path), u'Supplied path is not a valid directory {0!r:s}'.format(self.path)
try:
self.sequencepath = os.path.join(args.sequencepath)
except AttributeError:
self.sequencepath = self.path
assert os.path.isdir(self.sequencepath), u'Sequence path is not a valid directory {0!r:s}' \
.format(self.sequencepath)
try:
self.targetpath = os.path.join(args.reffilepath, analysistype)
except AttributeError:
self.targetpath = os.path.join(args.targetpath)
self.reportpath = os.path.join(self.path, 'reports')
assert os.path.isdir(self.targetpath), u'Target path is not a valid directory {0!r:s}' \
.format(self.targetpath)
try:
self.bcltofastq = args.bcltofastq
except AttributeError:
self.bcltofastq = False
try:
self.miseqpath = args.miseqpath
except AttributeError:
self.miseqpath = str()
try:
self.miseqfolder = args.miseqfolder
except AttributeError:
self.miseqfolder = str()
try:
self.fastqdestination = args.fastqdestination
except AttributeError:
self.fastqdestination = str()
try:
self.forwardlength = args.forwardlength
except AttributeError:
self.forwardlength = 'full'
try:
self.reverselength = args.reverselength
except AttributeError:
self.reverselength = 'full'
self.numreads = 2 if self.reverselength != 0 else 1
try:
self.customsamplesheet = args.customsamplesheet
except AttributeError:
self.customsamplesheet = False
self.logfile = args.logfile
# Set the custom cutoff value
self.cutoff = float(cutoff)
try:
self.averagedepth = int(args.averagedepth)
except AttributeError:
self.averagedepth = 10
try:
self.copy = args.copy
except AttributeError:
self.copy = False
self.runmetadata = args.runmetadata
# Use the argument for the number of threads to use, or default to the number of cpus in the system
self.cpus = int(args.cpus)
try:
self.threads = int(self.cpus / len(self.runmetadata.samples)) if self.cpus / len(self.runmetadata.samples) \
> 1 else 1
except TypeError:
self.threads = self.cpus
self.taxonomy = {'Escherichia': 'coli', 'Listeria': 'monocytogenes', 'Salmonella': 'enterica'}
self.analysistype = analysistype
self.pipeline = pipeline
self.revbait = revbait
self.allow_soft_clips = allow_soft_clips
class Resistance(ResSippr):
def main(self):
res_report = os.path.join(self.reportpath, 'resfinder.csv')
if os.path.isfile(res_report):
self.parse_report(res_report=res_report)
else:
self.runner()
# Create the reports
self.reporter()
def parse_report(self, res_report):
"""
Parse an existing report, and extract the results
:param res_report: type STR: name and absolute path of the report
"""
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].results = dict()
sample[self.analysistype].pipelineresults = list()
sample[self.analysistype].avgdepth = dict()
with open(res_report, 'r') as report:
next(report)
for line in report:
try:
strain, res, gene, allele, accession, perc_ident, length, fold_cov = line.rstrip().split(',')
if sample.name in line:
if strain:
name = '{gene}_{accession}_{allele}'.format(gene=gene,
accession=accession,
allele=allele)
sample[self.analysistype].results[name] = perc_ident
sample[self.analysistype].pipelineresults.append(
'{rgene} ({pid}%) {rclass}'.format(rgene=gene,
pid=perc_ident,
rclass=res))
sample[self.analysistype].avgdepth[name] = fold_cov
except ValueError:
pass
def reporter(self):
"""
Creates a report of the results
"""
logging.info('Creating {at} report'.format(at=self.analysistype))
resistance_classes = ResistanceNotes.classes(self.targetpath)
# Find unique gene names with the highest percent identity
for sample in self.runmetadata.samples:
try:
if sample[self.analysistype].results:
# Initialise a dictionary to store the unique genes, and their percent identities
sample[self.analysistype].uniquegenes = dict()
for name, identity in sample[self.analysistype].results.items():
# Split the name of the gene from the string e.g. ARR-2_1_HQ141279 yields ARR-2
genename = name.split('_')[0]
# Set the best observed percent identity for each unique gene
try:
# Pull the previous best identity from the dictionary
bestidentity = sample[self.analysistype].uniquegenes[genename]
# If the current identity is better than the old identity, save it
if float(identity) > float(bestidentity):
sample[self.analysistype].uniquegenes[genename] = float(identity)
# Initialise the dictionary if necessary
except KeyError:
sample[self.analysistype].uniquegenes[genename] = float(identity)
except AttributeError:
pass
# Create the path in which the reports are stored
make_path(self.reportpath)
# Initialise strings to store the results
data = 'Strain,Resistance,Gene,Allele,Accession,PercentIdentity,Length,FoldCoverage\n'
with open(os.path.join(self.reportpath, self.analysistype + '.csv'), 'w') as report:
for sample in self.runmetadata.samples:
# Create an attribute to store the string for the eventual pipeline report
sample[self.analysistype].pipelineresults = list()
if sample[self.analysistype].results:
results = False
for name, identity in sorted(sample[self.analysistype].results.items()):
# Extract the necessary variables from the gene name string
gname, genename, accession, allele = ResistanceNotes.gene_name(name)
# Retrieve the best identity for each gene
try:
percentid = sample[self.analysistype].uniquegenes[gname]
# Beta-lactamases will not have the allele and version from the gene name defined above
except KeyError:
percentid = sample[self.analysistype].uniquegenes[gname.split('-')[0]]
# If the percent identity of the current gene matches the best percent identity, add it to
# the report - there can be multiple occurrences of genes e.g.
# sul1,1,AY224185,100.00,840 and sul1,2,CP002151,100.00,927 are both included because they
# have the same 100% percent identity
if float(identity) == percentid:
try:
# Determine resistance phenotype of the gene
res = ResistanceNotes.resistance(name, resistance_classes)
# Populate the results
data += '{sn},{res},{gene},{allele},{accession},{identity},{length},{depth}\n'.format(
sn=sample.name,
res=res,
gene=genename,
allele=allele,
accession=accession,
identity=identity,
length=len(sample[self.analysistype].sequences[name]),
depth=sample[self.analysistype].avgdepth[name])
sample[self.analysistype].pipelineresults.append(
'{rgene} ({pid}%) {rclass}'.format(rgene=genename,
pid=identity,
rclass=res)
)
results = True
except KeyError:
pass
if not results:
data += '{sn}\n'.format(sn=sample.name)
else:
data += '{sn}\n'.format(sn=sample.name)
# Write the string to the file
report.write(data)
class ResFinder(GeneSeekr):
@staticmethod
def sequencenames(contigsfile):
"""
Takes a multifasta file and returns a list of sequence names
:param contigsfile: multifasta of all sequences
:return: list of all sequence names
"""
sequences = list()
for record in SeqIO.parse(open(contigsfile, "rU", encoding="iso-8859-15"), "fasta"):
sequences.append(record.id)
return sequences
def strainer(self):
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
setattr(sample, self.analysistype, GenObject())
targets = glob(os.path.join(self.targetpath, '*.tfa'))
targetcheck = glob(os.path.join(self.targetpath, '*.tfa'))
if targetcheck:
try:
combinedtargets = glob(os.path.join(self.targetpath, '*.fasta'))[0]
except IndexError:
combinetargets(targets, self.targetpath)
combinedtargets = glob(os.path.join(self.targetpath, '*.fasta'))[0]
sample[self.analysistype].targets = targets
sample[self.analysistype].combinedtargets = combinedtargets
sample[self.analysistype].targetpath = self.targetpath
sample[self.analysistype].targetnames = self.sequencenames(combinedtargets)
sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory,
self.analysistype)
make_path(sample[self.analysistype].reportdir)
else:
# Set the metadata file appropriately
sample[self.analysistype].targets = 'NA'
sample[self.analysistype].combinedtargets = 'NA'
sample[self.analysistype].targetpath = 'NA'
sample[self.analysistype].targetnames = 'NA'
sample[self.analysistype].reportdir = 'NA'
sample[self.analysistype].blastresults = 'NA'
else:
# Set the metadata file appropriately
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].targets = 'NA'
sample[self.analysistype].combinedtargets = 'NA'
sample[self.analysistype].targetpath = 'NA'
sample[self.analysistype].targetnames = 'NA'
sample[self.analysistype].reportdir = 'NA'
sample[self.analysistype].blastresults = 'NA'
def resfinderreporter(self):
"""
Custom reports for ResFinder analyses. These reports link the gene(s) found to their resistance phenotypes
"""
# Initialise resistance dictionaries from the notes.txt file
resistance_classes = ResistanceNotes.classes(self.targetpath)
# Create a workbook to store the report. Using xlsxwriter rather than a simple csv format, as I want to be
# able to have appropriately sized, multi-line cells
workbook = xlsxwriter.Workbook(os.path.join(self.reportpath, '{}.xlsx'.format(self.analysistype)))
# New worksheet to store the data
worksheet = workbook.add_worksheet()
# Add a bold format | |
}
Attributes
----------
condition : anyOf(Conditional<MarkPropFieldDef>, Conditional<ValueDef>, list)
A field definition or one or more value definition(s) with a selection
predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_schema = {'$ref': '#/definitions/MarkPropValueDefWithCondition'}
_rootschema = Root._schema
def __init__(self, condition=Undefined, value=Undefined, **kwds):
super(MarkPropValueDefWithCondition, self).__init__(condition=condition,
value=value, **kwds)
class TextValueDefWithCondition(SchemaBase):
"""
A ValueDef with Condition<ValueDef | FieldDef>
{
condition: {field: ...} | {value: ...},
value: ...,
}
Attributes
----------
condition : anyOf(Conditional<TextFieldDef>, Conditional<ValueDef>, list)
A field definition or one or more value definition(s) with a selection
predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_schema = {'$ref': '#/definitions/TextValueDefWithCondition'}
_rootschema = Root._schema
def __init__(self, condition=Undefined, value=Undefined, **kwds):
super(TextValueDefWithCondition, self).__init__(condition=condition,
value=value, **kwds)
class VerticalAlign(SchemaBase):
"""VerticalAlign schema wrapper"""
_schema = {'$ref': '#/definitions/VerticalAlign'}
_rootschema = Root._schema
def __init__(self, *args):
super(VerticalAlign, self).__init__(*args)
class VgAxisConfig(SchemaBase):
"""VgAxisConfig schema wrapper
Attributes
----------
bandPosition : float
An interpolation fraction indicating where, for `band` scales, axis ticks
should be positioned. A value of `0` places ticks at the left edge of their
bands. A value of `0.5` places ticks in the middle of their bands.
domain : boolean
A boolean flag indicating if the domain (the axis baseline) should be
included as part of the axis. __Default value:__ `true`
domainColor : string
Color of axis domain line. __Default value:__ (none, using Vega default).
domainWidth : float
Stroke width of axis domain line __Default value:__ (none, using Vega
default).
grid : boolean
A boolean flag indicating if grid lines should be included as part of the
axis __Default value:__ `true` for [continuous
scales](scale.html#continuous) that are not binned; otherwise, `false`.
gridColor : string
Color of gridlines.
gridDash : list
The offset (in pixels) into which to begin drawing with the grid dash array.
gridOpacity : float
The stroke opacity of grid (value between [0,1]) __Default value:__ (`1` by
default)
gridWidth : float
The grid width, in pixels.
labelAngle : float
The rotation angle of the axis labels. __Default value:__ `-90` for nominal
and ordinal fields; `0` otherwise.
labelBound : anyOf(boolean, float)
Indicates if labels should be hidden if they exceed the axis range. If
`false `(the default) no bounds overlap analysis is performed. If `true`,
labels will be hidden if they exceed the axis range by more than 1 pixel. If
this property is a number, it specifies the pixel tolerance: the maximum
amount by which a label bounding box may exceed the axis range. __Default
value:__ `false`.
labelColor : string
The color of the tick label, can be in hex color code or regular color name.
labelFlush : anyOf(boolean, float)
Indicates if the first and last axis labels should be aligned flush with the
scale range. Flush alignment for a horizontal axis will left-align the first
label and right-align the last label. For vertical axes, bottom and top text
baselines are applied instead. If this property is a number, it also
indicates the number of pixels by which to offset the first and last labels;
for example, a value of 2 will flush-align the first and last labels and
also push them 2 pixels outward from the center of the axis. The additional
adjustment can sometimes help the labels better visually group with
corresponding axis ticks. __Default value:__ `true` for axis of a
continuous x-scale. Otherwise, `false`.
labelFont : string
The font of the tick label.
labelFontSize : float
The font size of the label, in pixels.
labelLimit : float
Maximum allowed pixel width of axis tick labels.
labelOverlap : anyOf(boolean, string, string)
The strategy to use for resolving overlap of axis labels. If `false` (the
default), no overlap reduction is attempted. If set to `true` or `"parity"`,
a strategy of removing every other label is used (this works well for
standard linear axes). If set to `"greedy"`, a linear scan of the labels is
performed, removing any labels that overlaps with the last visible label
(this often works better for log-scaled axes). __Default value:__ `true`
for non-nominal fields with non-log scales; `"greedy"` for log scales;
otherwise `false`.
labelPadding : float
The padding, in pixels, between axis and text labels.
labels : boolean
A boolean flag indicating if labels should be included as part of the axis.
__Default value:__ `true`.
maxExtent : float
The maximum extent in pixels that axis ticks and labels should use. This
determines a maximum offset value for axis titles. __Default value:__
`undefined`.
minExtent : float
The minimum extent in pixels that axis ticks and labels should use. This
determines a minimum offset value for axis titles. __Default value:__ `30`
for y-axis; `undefined` for x-axis.
tickColor : string
The color of the axis's tick.
tickRound : boolean
Boolean flag indicating if pixel position values should be rounded to the
nearest integer.
tickSize : float
The size in pixels of axis ticks.
tickWidth : float
The width, in pixels, of ticks.
ticks : boolean
Boolean value that determines whether the axis should include ticks.
titleAlign : string
Horizontal text alignment of axis titles.
titleAngle : float
Angle in degrees of axis titles.
titleBaseline : string
Vertical text baseline for axis titles.
titleColor : string
Color of the title, can be in hex color code or regular color name.
titleFont : string
Font of the title. (e.g., `"Helvetica Neue"`).
titleFontSize : float
Font size of the title.
titleFontWeight : anyOf(string, float)
Font weight of the title. (e.g., `"bold"`).
titleLimit : float
Maximum allowed pixel width of axis titles.
titleMaxLength : float
Max length for axis title if the title is automatically generated from the
field's description.
titlePadding : float
The padding, in pixels, between title and axis.
titleX : float
X-coordinate of the axis title relative to the axis group.
titleY : float
Y-coordinate of the axis title relative to the axis group.
"""
_schema = {'$ref': '#/definitions/VgAxisConfig'}
_rootschema = Root._schema
def __init__(self, bandPosition=Undefined, domain=Undefined,
domainColor=Undefined, domainWidth=Undefined, grid=Undefined,
gridColor=Undefined, gridDash=Undefined, gridOpacity=Undefined,
gridWidth=Undefined, labelAngle=Undefined, labelBound=Undefined,
labelColor=Undefined, labelFlush=Undefined, labelFont=Undefined,
labelFontSize=Undefined, labelLimit=Undefined,
labelOverlap=Undefined, labelPadding=Undefined, labels=Undefined,
maxExtent=Undefined, minExtent=Undefined, tickColor=Undefined,
tickRound=Undefined, tickSize=Undefined, tickWidth=Undefined,
ticks=Undefined, titleAlign=Undefined, titleAngle=Undefined,
titleBaseline=Undefined, titleColor=Undefined, titleFont=Undefined,
titleFontSize=Undefined, titleFontWeight=Undefined,
titleLimit=Undefined, titleMaxLength=Undefined,
titlePadding=Undefined, titleX=Undefined, titleY=Undefined, **kwds):
super(VgAxisConfig, self).__init__(bandPosition=bandPosition, domain=domain,
domainColor=domainColor,
domainWidth=domainWidth, grid=grid,
gridColor=gridColor, gridDash=gridDash,
gridOpacity=gridOpacity,
gridWidth=gridWidth,
labelAngle=labelAngle,
labelBound=labelBound,
labelColor=labelColor,
labelFlush=labelFlush,
labelFont=labelFont,
labelFontSize=labelFontSize,
labelLimit=labelLimit,
labelOverlap=labelOverlap,
labelPadding=labelPadding, labels=labels,
maxExtent=maxExtent, minExtent=minExtent,
tickColor=tickColor, tickRound=tickRound,
tickSize=tickSize, tickWidth=tickWidth,
ticks=ticks, titleAlign=titleAlign,
titleAngle=titleAngle,
titleBaseline=titleBaseline,
titleColor=titleColor,
titleFont=titleFont,
titleFontSize=titleFontSize,
titleFontWeight=titleFontWeight,
titleLimit=titleLimit,
titleMaxLength=titleMaxLength,
titlePadding=titlePadding, titleX=titleX,
titleY=titleY, **kwds)
class VgBinding(SchemaBase):
"""VgBinding schema wrapper"""
_schema = {'$ref': '#/definitions/VgBinding'}
_rootschema = Root._schema
def __init__(self, *args, **kwds):
super(VgBinding, self).__init__(*args, **kwds)
class VgCheckboxBinding(SchemaBase):
"""VgCheckboxBinding schema wrapper
Attributes
----------
element : string
input : string
"""
_schema = {'$ref': '#/definitions/VgCheckboxBinding'}
_rootschema = Root._schema
def __init__(self, input=Undefined, element=Undefined, **kwds):
super(VgCheckboxBinding, self).__init__(input=input, element=element, **kwds)
class VgEventStream(SchemaBase):
"""VgEventStream schema wrapper"""
_schema = {'$ref': '#/definitions/VgEventStream'}
_rootschema = Root._schema
def __init__(self, **kwds):
super(VgEventStream, self).__init__(**kwds)
class VgGenericBinding(SchemaBase):
"""VgGenericBinding schema wrapper
Attributes
----------
element : string
input : string
"""
_schema = {'$ref': '#/definitions/VgGenericBinding'}
_rootschema = Root._schema
def __init__(self, input=Undefined, element=Undefined, **kwds):
super(VgGenericBinding, self).__init__(input=input, element=element, **kwds)
class VgMarkConfig(SchemaBase):
"""VgMarkConfig schema wrapper
Attributes
----------
align : HorizontalAlign
The horizontal alignment of the text. One of `"left"`, `"right"`,
`"center"`.
angle : float
The rotation angle of the text, in degrees.
baseline : VerticalAlign
The vertical alignment of the text. One of `"top"`, `"middle"`, `"bottom"`.
__Default value:__ `"middle"`
cursor : string
The mouse cursor used over the mark. Any valid [CSS cursor
type](https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values) can be
used.
dx : float
The horizontal offset, in pixels, between the text label and its anchor
point. The offset is applied after rotation by the _angle_ property.
dy : float
The vertical offset, in pixels, between the text label and its anchor point.
The offset is applied after rotation by the _angle_ property.
fill : string
Default Fill Color. This has higher precedence than config.color __Default
value:__ (None)
fillOpacity : float
The fill | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
emha-koeln, SkyEmie, bbb0
"""
import time
#from flashbootlib import test
import os
from os import path
import math
import subprocess
from subprocess import PIPE, run
import platform
import configparser
import argparse
#import time
##########################################################################################################################
VERSION = '0.1.1.20200802'
CONF_FILE = 'subls.conf'
UNLOCKCODE_FILE = 'unlock_code.txt'
PLATFORM = 'unknown'
##########################################################################################################################
## system and plattform
def cls():
if PLATFORM == 'Windows':
os.system('cls')
elif PLATFORM == 'Linux':
os.system('clear')
def runOS(cmd, iVerboselevel=1): #0silent 1stdout/err 2cmd and stdout7err 3 was verbose
sCmd = cmd
cmd = cmd.split(' ')
try:
result = run(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
#if args.verbose:
if iVerboselevel == 1:
#print(sCmd)
print(result.stdout, result.stderr)
elif iVerboselevel == 2:
print(sCmd)
print(result.stdout, result.stderr)
elif iVerboselevel == 3:
print('Shell Result for:', sCmd)
print('Returncode:', str(result.returncode))
print(result.stdout, result.stderr)
return(result)
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
##########################################################################################################################
## adb and fastboot
def testADBDevice(sSN="0"):
found = ''
iDev = 0
while(found == ''):
#print('adb devices: Waiting for devices...')
result = runOS("adb devices", 2)
found = result.stdout
time.sleep(1)
#print(result.stdout)
sDevList = found.split('\n')
devs = ['0000000000000000']
iDevs = []
#print(sDevList)
for i in sDevList:
if not i == 'device':
if not i == 'List of devices attached':
if 'device' in i:
devs.append(i[:16])
#print('INFO: Found device', i[:16] )
#print(len(devs))
#print(devs)
print('INFO: maybe your device isn\'t found by adb because it\'s already in bootloader-mode')
print(' in this case select \'0\' or simply press Enter')
print ('Select device: ')
for i in range(0, len(devs)):
iDevs.append(i)
result = runOS('adb -s '+devs[i]+' shell getprop ro.product.manufacturer', 0)
print(i, devs[i], result.stdout[:-1] )
iDev = input(iDevs)# or '0') doesn't work here...?
#print('hmmmmmmmmm', iDev)
if iDev == '':
iDev = 0
return(devs[int(iDev)])
def testFastbootDevice(sSN="0"):
found = ''
#if sSN == 'unknown':
# sSN='0'
iCount = 0
while(found == ''):
cls()
print('INFO: Waiting for devices...', 20-iCount)
result = runOS("fastboot devices", 3)
found = result.stdout
time.sleep(1)
if iCount == 20:
print('INFO: Counldn\'t detect any device with > fastboot devices')
input('Press Enter to exit...\n')
exit(-1)
iCount += 1
#print(result.stdout)
sDevList = found.split('\n')
devs = []
iDevs = []
for i in sDevList:
if not i == 'fastboot':
if 'fastboot' in i:
print('fastboot: Found device', i[:16] )
devs.append(i[:16])
if str(sSN) == str(i[:16]):
print('INFO: Found your device', i[:16])
return(sSN)
print('INFO: Couldn\'t find your device', sSN)
#print(len(devs))
#print(devs)
print('Select device: ')
for i in range(0, len(devs)):
iDevs.append(i)
print(i, devs[i])
iDev = input(iDevs or '0')
if iDev == '':
iDev = 0
#if args.verbose:
# cls()
# runOS('fastboot oem get-product-model')
# input('Press Enter ...')
return(devs[int(iDev)])
##########################################################################################################################
## init luhn
def initLuhn():
# IMEI
if args.imei:
print('INFO: using', args.imei, 'as IMEI')
imei = args.imei
checksum = luhn_checksum(imei)
print('INFO: Luhn checksum is: '+str(checksum))
input('wait')
elif not config['DEFAULT']['imei']:
print('Enter IMEI:')
imei = int(input('Type IMEI digit :'))
checksum = luhn_checksum(imei)
print('INFO: Luhn checksum is: '+str(checksum))
config['DEFAULT']['imei'] = str(imei)
config['DEFAULT']['checksum'] = str(checksum)
with open(CONF_FILE, 'w') as f:
config.write(f)
else:
print('INFO: found IMEI in', CONF_FILE+":", config['DEFAULT']['imei'])
print(' checksum in', CONF_FILE+":", config['DEFAULT']['checksum'])
if args.base:
print(' new base -b:', args.base)
else:
print(' last code was', CONF_FILE+":", config['DEFAULT']['algoOEMcode'])
if not int(config['DEFAULT']['checksum']) == int(luhn_checksum(config['DEFAULT']['imei'])):
print('INFO: Luhn checksum\('+str(luhn_checksum(config['DEFAULT']['imei']))+
'\) is not equal to saved one\('+str(config['DEFAULT']['checksum']+'\)'))
print('INFO: This is ok, if your are continuing from a previous run')
yn = input('(Y/n) Press Enter to continue with last run or \'n\' to enter new IMEI: ')
if not yn.lower() == 'n':
print('INFO: continuing with last IMEI, checksum')
imei = int(config['DEFAULT']['imei'])
checksum = int(config['DEFAULT']['checksum'])
print('INFO: IMEI', imei)
print('INFO: checksum', checksum)
else:
print('Enter new IMEI:')
imei = int(input('Type IMEI digit :'))
checksum = luhn_checksum(imei)
print('INFO: Luhn checksum is: '+str(checksum))
config['DEFAULT']['imei'] = str(imei)
config['DEFAULT']['checksum'] = str(checksum)
with open(CONF_FILE, 'w') as f:
config.write(f)
return checksum
##########################################################################################################################
## init numeric
def initNumeric():
lastNum = 0
if not config['DEFAULT']['lastNumeric']:
lastNum = input('Enter new number to start from: (Default=0)')
if lastNum == '':
lastNum = 0
config['DEFAULT']['lastNumeric'] = str(lastNum)
with open(CONF_FILE, 'w') as f:
config.write(f)
else:
print('INFO: found a saved number from last run in', CONF_FILE+":", config['DEFAULT']['lastNumeric'])
default = str(config['DEFAULT']['lastNumeric'])
#print(default)
lastNum = str(input('Press Enter to continue with last run or enter a new start number: ') or 'n')
# or default))
if lastNum.lower() == 'n':
print('INFO: continuing with last run')
lastNum = int(config['DEFAULT']['lastNumeric'])
print('INFO: lastNumeric', lastNum)
else:
print('INFO: continuing with', lastNum)
config['DEFAULT']['lastNumeric'] = str(lastNum)
with open(CONF_FILE, 'w') as f:
config.write(f)
#if not yn.lower() == 'n':
# print('INFO: continuing with last run')
# lastNum = int(config['DEFAULT']['lastNumeric'])
# print('INFO: lastNumeric', lastNum)
#else:
# lastNum = int(input('Enter new number to start from:'))
# config['DEFAULT']['lastNumeric'] = str(lastNum)
# with open(CONF_FILE, 'w') as f:
# config.write(f)
return(int(lastNum))
##########################################################################################################################
## tryUnlockBootloader numeric
def tryUnlockNumeric(algoOEMcode):
#algoOEMcode = int(algoOEMcode)
unlock = False
save = 0
cls()
while(unlock == False):
cmd = 'fastboot oem unlock '+ str(algoOEMcode).rjust(16,'0')
if args.verbose:
print("Bruteforceing... " + cmd)
print(" methode: Numeric")
print(" algoOEMcode: " + str(algoOEMcode))
print(" ... (next save in "+str(200-save)+")")
else:
print("Bruteforceing... " + cmd)
result = runOS(cmd, 0)
# ToDo:
# if result.returncode == 0:
sprout = result.stdout + ' ' + result.stderr
sdrout = sprout.replace('\n', ' ').split(' ')
if not result.returncode == 1:
print('INFO: ', sdrout)
input('Press Enter...\n')
for i in sdrout:
if i.lower() == 'success':
print('INFO: ', i)
bak = open(UNLOCKCODE_FILE, "w")
bak.write("Your saved bootloader code : "+str(algoOEMcode)+"\nDEBUG sprout was: "+str(sprout))
bak.close()
input('Press Enter...\n')
return(algoOEMcode)
elif i.lower() == 'reboot':
print('INFO: ', i)
print('\n\nSorry, your bootloader has additional protection that other models don\'t have\nI can\'t do anything.. :c\n\n')
input('Press Enter to exit..\n')
exit()
else:
if args.verbose:
for i in sdrout:
if i.lower() == 'waiting':
print('INFO: ', i, 'for device...')
save +=1
if save == 200:
save = 0
config['DEFAULT']['lastNumeric'] = str(algoOEMcode)
with open(CONF_FILE, 'w') as f:
config.write(f)
# bak = open("unlock_code.txt", "w")
# bak.write("If you need to pick up where you left off,\nchange the algoOEMcode variable with #base comment to the following value :\n"+str(algoOEMcode))
# bak.close()
algoOEMcode += 1
##########################################################################################################################
## tryUnlockBootloader luhn
def tryUnlockBootloader(checksum):
#algoOEMcode = 1000000000000000 #base
unlock = False
save = 0
if args.base:
algoOEMcode = int(args.base)
elif config['DEFAULT']['algoOEMcode']:
algoOEMcode = int(config['DEFAULT']['algoOEMcode'])
else:
algoOEMcode = int(config['DEFAULT']['base'])
#clear = 0
#os.system('clear')
cls()
while(unlock == False):
cmd = 'fastboot oem unlock '+ str(algoOEMcode).rjust(16,'0')
if args.verbose:
print("Bruteforceing... " + cmd)
print(" methode: Luhn-Checksum")
print(" algoOEMcode: " + str(algoOEMcode))
print(" checksum: " + str(checksum))
print(" ... (next save in "+str(200-save)+")")
else:
print("Bruteforceing... " + cmd + "(checksum=" + str(checksum) +")")
result = runOS(cmd, 0)
# ToDo:
# if result.returncode == 0:
sprout = result.stdout + ' ' + result.stderr
sdrout = sprout.replace('\n', ' ').split(' ')
if not result.returncode == 1:
print('INFO: ', sdrout)
input('Press Enter...\n')
for i in sdrout:
if i.lower() == 'success':
print('INFO: ', i)
bak = open(UNLOCKCODE_FILE, "w")
bak.write("Your saved bootloader code : "+str(algoOEMcode)+"\nDEBUG sprout was: "+str(sprout))
bak.close()
input('Press Enter...\n')
return(algoOEMcode)
elif i.lower() == 'reboot':
print('INFO: ', i)
print('\n\nSorry, your bootloader has additional protection that other models don\'t have\nI can\'t do anything.. :c\n\n')
input('Press Enter to exit..\n')
exit()
else:
if args.verbose:
for i in sdrout:
if i.lower() == 'waiting':
print('INFO: ', i, 'for device...')
save +=1
if save == 200:
save = 0
if not args.imei and not args.base:
config['DEFAULT']['algoOEMcode'] = str(algoOEMcode)
with open(CONF_FILE, 'w') as f:
config.write(f)
# bak = open("unlock_code.txt", "w")
# bak.write("If you need to pick up where you left off,\nchange the algoOEMcode variable with #base comment to the following value :\n"+str(algoOEMcode))
# bak.close()
algoOEMcode = algoIncrementChecksum(algoOEMcode, checksum)
if algoOEMcode > 9999999999999999:
#input('> 9999999999999999 Press Enter...')
if args.base:
algoOEMcode = args.base
else:
algoOEMcode = int(config['DEFAULT']['base'])
checksum += 1
if str(checksum)[-1:] == str(luhn_checksum(imei)):
print('INFO: Giving up.')
input('Press Enter to exit')
exit(-1)
checksum = int(str(checksum)[-1:])
config['DEFAULT']['checksum'] = str(checksum)[-1:]
with open(CONF_FILE, 'w') as f:
config.write(f)
##########################################################################################################################
## algo and luhn checksum
def algoIncrementChecksum(genOEMcode, checksum):
if args.imei:
genOEMcode+=int(checksum+math.sqrt(int(args.imei))*1024)
| |
= setup()
trace('Finding blank node subjects...')
query = conn.prepareTupleQuery(QueryLanguage.SPARQL, 'SELECT * WHERE { ?subject %s _:object . }' %
(URIs.pointBlank))
results = query.evaluate()
# Pick one to use
result = next(results)['subject']
trace(str(result))
trace('Attempting to query using SPARQL with binding...')
query = conn.prepareTupleQuery(QueryLanguage.SPARQL, 'SELECT * WHERE { ?subject %s ?object . }' %
(URIs.pointBlank))
query.setBinding('subject', result)
output = ' '.join([str(result) for result in query.evaluate()])
trace(output)
assert len(output)
def test_setInferencing():
"""prepareTupleQuery/setInferencing usage"""
conn = setup()
query = conn.prepareTupleQuery(QueryLanguage.SPARQL, 'SELECT * WHERE { %s %s ?child . }' %
(URIs.robert, URIs.fatherOf.toNTriples()))
## List the children of Robert with inference ON. The owl:sameAs
## link combines the children of Bob with those of Robert.
trace('Querying children of Robert, inference ON')
query.setIncludeInferred(True)
results = query.evaluate()
on_len = len(results)
trace(' '.join([str(result) for result in results]))
## List the children of Robert with inference OFF.
query.setIncludeInferred(False)
trace('Querying children of Robert, inferencing OFF')
results = query.evaluate()
off_len = len(results)
trace(' '.join([str(result) for result in results]))
assert on_len > off_len
def test_json_xml_response():
"""Test JSON and other result formats from SPARQL"""
conn = setup()
query = conn.prepareQuery(QueryLanguage.SPARQL, 'SELECT * WHERE { %s %s ?child . }' %
(URIs.robert, URIs.fatherOf.toNTriples()))
## List the children of Robert with inference ON. The owl:sameAs
## link combines the children of Bob with those of Robert.
trace('Querying children of Robert w/ SPARQL, inference ON, application/sparql-results+xml')
query.setIncludeInferred(True)
trace(query.evaluate_generic_query(accept='application/sparql-results+xml'))
trace('Querying children of Robert w/ SPARQL, inference ON, application/sparql-results+json')
trace(query.evaluate_generic_query(accept='application/sparql-results+json'))
query = conn.prepareQuery(QueryLanguage.PROLOG, '(select (?child) (q !%s !%s ?child))' %
(URIs.robert, URIs.fatherOf.toNTriples()))
trace('Querying children of Robert w/ PROLOG, inference ON, application/sparql-results+xml')
query.setIncludeInferred(True)
trace(query.evaluate_generic_query(accept='application/sparql-results+xml'))
trace('Querying children of Robert w/ PROLOG, inference ON, application/sparql-results+json')
trace(query.evaluate_generic_query(accept='application/sparql-results+json'))
def test_construct_query():
"""Test whether Construct Query"""
conn = setup()
query = conn.prepareGraphQuery("SPARQL",
'CONSTRUCT { ?p %s ?child . } WHERE { ?p ?relationship ?child . }' %
URIs.hasChild)
query.setBinding('relationship', URIs.fatherOf)
query.setIncludeInferred(True)
trace('Trying a CONSTRUCT query with inferred ON')
results = query.evaluate()
assert len(results)
trace('\n'.join([str(result) for result in results]))
query.setIncludeInferred(False)
trace('Trying a CONSTRUCT query with inferred OFF')
results = query.evaluate()
assert len(results)
trace('\n'.join([str(result) for result in results]))
def test_session_loadinitfile():
"""
Test starting a session with loadinitfile True.
"""
# Basically ripped off from the miniclient tests.
server = AllegroGraphServer(AG_HOST, AG_PORT, USER, PASSWORD, proxy=AG_PROXY)
conn = connect()
for x in range(0, 10):
conn.mini_repository.addStatement("<http:%d>" % x, "<http:before>", "<http:%d>" % (x + 1))
conn.mini_repository.addStatement("<http:%d>" % (x + 1), "<http:after>", "<http:%d>" % x)
eq_([["<http:2>"]], conn.mini_repository.evalPrologQuery("(select (?x) (q- ?x !<http:before> !<http:3>))")["values"])
server.setInitfile("(<-- (after-after ?a ?b) (q- ?a !<http:after> ?x) (q- ?x !<http:after> ?b))")
print(server.getInitfile())
eq_([["<http:5>"]], conn.mini_repository.evalPrologQuery("(select (?x) (after-after ?x !<http:3>))")["values"])
with conn.session(autocommit=True, loadinitfile=True) as session:
eq_([["<http:5>"]], session.mini_repository.evalPrologQuery("(select (?x) (after-after ?x !<http:3>))")["values"])
with conn.session(autocommit=True, loadinitfile=False) as session:
assert_raises(RequestError, session.mini_repository.evalPrologQuery, ("(select (?x) (after-after ?x !<http:3>))",))
server.setInitfile(None)
def test_freetext():
"""
Test registering a free text predicate, then doing a SPARQL query on it.
"""
conn = connect()
pred = URI('http://www.franz.com/has_name')
conn.createFreeTextIndex("index1", predicates=[pred])
conn.createFreeTextIndex("index2", indexFields=["predicate", "object"], indexResources="short", minimumWordSize=4,
innerChars='alpha', tokenizer='default')
conn.modifyFreeTextIndex("index2", indexFields=["predicate", "object"], indexResources="short", minimumWordSize=2,
innerChars='alphanumeric', tokenizer='default')
# config parameter fetching
preds = conn.getFreeTextIndexConfiguration("index1")["predicates"]
eq_(1, len(preds))
eq_(str(pred), str(preds[0]))
config = conn.getFreeTextIndexConfiguration("index2")
eq_(["object", "predicate"], sorted(config["indexFields"]))
eq_(2, config["minimumWordSize"])
eq_("short", config["indexResources"])
eq_(['alphanumeric'], config["innerChars"])
eq_([], config["borderChars"])
eq_("default", config["tokenizer"])
assert len(config["stopWords"])
def contractor(i):
return URI("http://www.franz.com/contractor#" + str(i))
conn.addTriple(contractor(0), pred, '<NAME>')
conn.addTriple(contractor(1), pred, '<NAME>')
conn.addTriple(contractor(2), pred, '<NAME>')
conn.addTriple(contractor(2), URI('http://www.franz.com/lives_in'), 'Berlin')
conn.addTriple(contractor(3), pred, 'Ed')
search1 = conn.evalFreeTextSearch('Ross', index="index1")
eq_(2, len(search1))
eq_(set([str(contractor(1)),str(contractor(0))]), set([str(search1[0][0]), str(search1[1][0])]))
eq_(2, len(conn.evalFreeTextSearch('Ross')))
# Test with limit/offset
search1 = conn.evalFreeTextSearch('Ross', index="index1", limit=1, offset=0)
search2 = conn.evalFreeTextSearch('Ross', index="index1", limit=1, offset=1)
assert len(search1) == 1
assert len(search2) == 1
assert search1[0][0] != search2[0][0]
# min word size
eq_(0, len(conn.evalFreeTextSearch('Ed', index="index1")))
eq_(1, len(conn.evalFreeTextSearch('Ed', index="index2")))
# indexing of predicates
eq_(0, len(conn.evalFreeTextSearch('has_name', index="index1")))
eq_(4, len(conn.evalFreeTextSearch('has_name', index="index2")))
# sparql
results = conn.prepareTupleQuery(QueryLanguage.SPARQL,
'SELECT ?something WHERE { ?something fti:match "<NAME>". }').evaluate()
assert len(results)
def test_javascript():
conn = connect()
assert conn.evalJavaScript("1+1") == 2
assert conn.evalJavaScript("store.addTriple('<a>', '<b>', '\"blah blah\"'); store.size") == 1
assert conn.evalJavaScript("store.getTriplesArray()[0].subject.value") == "a"
assert conn.evalJavaScript("store.getTriplesArray()[1]") is None
assert conn.evalJavaScript("store.getTriples().next().object.toString()") == '"blah blah"'
assert conn.evalJavaScript("store.indices.length > 2") is True
assert conn.evalJavaScript("store.createTextIndex('foo'); store.textSearch('blah', 'foo').next().predicate.value") == "b"
assert conn.evalJavaScript("namespaces.collect().length > 1") is True
assert conn.evalJavaScript("namespaces.register('blah', 'http://blah.com'); x = namespaces.lookup('blah'); " \
"namespaces.unregister('blah'); x + namespaces.lookup('blah')") == "http://blah.comnull"
with conn.session():
conn.evalJavaScript("var x = 100;")
assert conn.evalJavaScript("x") == 100
def test_roundtrips():
"""
Test round-tripping of Python values.
"""
conn = connect()
now = datetime.datetime.now()
conn.addTriple('<http:object>', '<http:bool>', True)
conn.addTriple('<http:object>', '<http:str>', 'Me')
conn.addTriple('<http:object>', '<http:int>', 1234)
conn.addTriple('<http:object>', '<http:long>', long(1234))
conn.addTriple('<http:object>', '<http:date>', now.date())
conn.addTriple('<http:object>', '<http:datetime>', now)
conn.addTriple('<http:object>', '<http:time>', now.time())
then = datetime.time(11, 34, 16, 386672)
conn.addTriple('<http:objecT>', '<http:time2>', then)
def checkit(name, the_type, value):
obj = next(conn.getStatements(None, '<http:%s>' % name, None)).\
getObject().toPython()
assert isinstance(obj, the_type)
assert obj == value
def time_check(name, the_type, value):
obj = next(conn.getStatements(None, '<http:%s>' % name, None)).\
getObject().toPython()
assert isinstance(obj, the_type)
# Ignore time zone - 'value' is going to be naive
obj = obj.replace(tzinfo=None)
# Microseconds can have floating point roundoff...
print('Original:', value, 'Store:', obj)
assert obj == value or abs(obj.microsecond - value.microsecond) < 300
checkit("bool", bool, True)
checkit("str", str, 'Me')
checkit("int", int, 1234)
checkit("long", long, long(1234))
checkit("date", datetime.date, now.date())
time_check("datetime", datetime.datetime, now)
time_check("time", datetime.time, now.time())
time_check("time2", datetime.time, then)
def test_add_commit_size():
"""
Test the add_commit_size setting.
"""
conn = connect()
path = os.path.join(CURRENT_DIRECTORY, "kennedy-error.nt")
baseURI = "http://example.org/example/local"
assert conn.add_commit_size is None
conn.add_commit_size = 10
assert conn.add_commit_size == 10
assert conn.getAddCommitSize() == 10
try:
conn.add(path, base=baseURI, format=RDFFormat.NTRIPLES)
except RequestError:
pass
assert conn.size() == 20
conn.clear()
conn.setAddCommitSize(0)
assert conn.getAddCommitSize() is None
try:
conn.add(path, base=baseURI, format=RDFFormat.NTRIPLES)
except RequestError:
pass
assert conn.size() == 0
def test_script_management():
server = AllegroGraphServer(AG_HOST, AG_PORT, USER, PASSWORD, proxy=AG_PROXY)
scripts = len(server.listScripts())
server.addScript("script.cl", test_script_management.code)
assert len(server.listScripts()) == (scripts + 1)
assert server.getScript("script.cl") == test_script_management.code
conn = connect()
result = conn.callStoredProc("add-two-ints", "script.cl", 1, 2)
print(result)
assert int(result) == 3
server.deleteScript("script.cl")
assert len(server.listScripts()) == scripts
test_script_management.code = """
;; ag 4.x style where we let the def-stored-proc code generate code
;; to check if the correct number of arguments were passed in the
;; argument vector and if not to return an error indication
(def-stored-proc add-two-ints (a b)
;; takes two values and adds them
(+ a b))
"""
def test_namespace_management():
"""
Test namespace management features
"""
conn = connect()
conn.clearNamespaces()
namespaces = conn.getNamespaces()
count = len(namespaces)
# assert that all namepaces returned by getNamespaces can be gotten individually
print(namespaces)
for namespace, value in iteritems(namespaces):
assert value == conn.getNamespace(namespace)
test_spaces = {'kdy': 'http://www.franz.com/simple#',
'vcd': 'http://www.w3.org/2001/vcard-rdf/3.0#',
'ex': 'http://example.org/people/',
'ont': 'http://example.org/ontology/',
'rltv': 'http://www.franz.com/simple#'}
for namespace, value in iteritems(test_spaces):
print('calling setNamespace', namespace, value)
conn.setNamespace(namespace, value)
assert count + len(test_spaces) == len(conn.getNamespaces())
for namespace, value in iteritems(test_spaces):
assert value == conn.getNamespace(namespace)
# Try adding a namespace that already exists
for namespace, value in iteritems(test_spaces):
conn.setNamespace(namespace, value)
assert count + len(test_spaces) == len(conn.getNamespaces())
# Remove the original namespaces
for namespace in iterkeys(namespaces):
conn.removeNamespace(namespace)
# Assert they are gone
assert len(test_spaces) == len(conn.getNamespaces())
for namespace in iterkeys(namespaces):
assert_raises(RequestError, conn.getNamespace, namespace)
# Test clearing all namespaces
conn.clearNamespaces(reset=False)
assert len(conn.getNamespaces()) == 0
# Add a bunch back and clear with reset
for namespace, value in iteritems(test_spaces):
conn.setNamespace(namespace, value)
assert len(test_spaces) == len(conn.getNamespaces())
conn.clearNamespaces(reset=True)
assert namespaces == conn.getNamespaces()
def test_indices():
"""
Test creating and deleting indices.
"""
conn = connect()
assert "spogi" in conn.listValidIndices()
assert "spogi" in conn.listIndices()
assert len(conn.listValidIndices()) > len(conn.listIndices())
try:
conn.addIndex("i")
assert "i" in conn.listIndices()
finally:
conn.dropIndex("i")
assert not ("i" in conn.listIndices())
def test_indices_on_create():
"""
Test passing indices to createRepository.
"""
server = AllegroGraphServer(AG_HOST, AG_PORT, USER, PASSWORD, proxy=AG_PROXY)
catalog = server.openCatalog(CATALOG)
if "optimal" in catalog.listRepositories():
catalog.deleteRepository("optimal")
indices = ["posgi", "gspoi"]
myRepository = catalog.createRepository("optimal", indices=indices)
myRepository.initialize()
conn = myRepository.getConnection()
assert set(indices) == set(conn.listIndices())
def test_optimize_indices():
conn=test6()
# Need a bigger store to test for real, just test the call for now
conn.optimizeIndices(wait=True);
def test_delete_duplicates():
graph1 = URI("http://www.example.com/graph#1")
graph2 = URI("http://www.example.com/graph#2")
with connect().session() as conn:
conn.add(URIs.robert, URIs.hasChild, URIs.roberta)
conn.add(URIs.robert, URIs.hasChild, URIs.roberta, graph1)
conn.add(URIs.robert, URIs.hasChild, URIs.roberta, graph2)
conn.add(URIs.robert, URIs.hasChild, URIs.roberta, graph2)
conn.add(URIs.bob, URIs.hasChild, URIs.bobby)
conn.add(URIs.bob, URIs.hasChild, URIs.bobby, graph1)
conn.add(URIs.bob, URIs.hasChild, URIs.bobby, graph2)
conn.add(URIs.bob, URIs.hasChild, URIs.bobby, graph2)
conn.commit()
assert conn.size() == 8
assert conn.getDuplicateStatements("spog").rowCount() == 2
conn.deleteDuplicates("spog")
conn.commit()
assert conn.size() == 6
assert conn.getDuplicateStatements("spo").rowCount() == 4
conn.deleteDuplicates("spo")
conn.commit()
assert conn.size() == 2
class URIs(object):
## Create URIs for Bob and Robert (and kids)
robert = URI('http://example.org/people/robert')
| |
each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=self._global_pooling,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test'), data['crossing']) # set y to None
else:
_data = (_data, data['crossing'])
return {'data': _data,
'ped_id': data['ped_id'],
'tte': data['tte'],
'image': data['image'],
'data_params': {'data_types': data_types, 'data_sizes': data_sizes},
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def get_model(self, data_params):
return_sequence = True
data_sizes = data_params['data_sizes']
data_types = data_params['data_types']
network_inputs = []
encoder_outputs = []
core_size = len(data_sizes)
conv3d_model = self._3dconv()
network_inputs.append(conv3d_model.input)
attention_size = self._num_hidden_units
if self._backbone == 'i3d':
x = Flatten(name='flatten_output')(conv3d_model.output)
x = Dense(name='emb_' + self._backbone,
units=attention_size,
activation='sigmoid')(x)
else:
x = conv3d_model.output
x = Dense(name='emb_' + self._backbone,
units=attention_size,
activation='sigmoid')(x)
encoder_outputs.append(x)
for i in range(1, core_size):
network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
encoder_outputs.append(
self._rnn(name='enc_' + data_types[i], r_sequence=return_sequence)(network_inputs[i]))
if len(encoder_outputs) > 1:
att_enc_out = []
x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[0])
att_enc_out.append(x) # first output is from 3d conv netwrok
# x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[0])
# att_enc_out.append(x) # first output is from 3d conv netwrok
# for recurrent branches apply many-to-one attention block
for i, enc_out in enumerate(encoder_outputs[1:]):
x = attention_3d_block(enc_out, dense_size=attention_size, modality='_' + data_types[i])
x = Dropout(0.5)(x)
x = Lambda(lambda x: K.expand_dims(x, axis=1))(x)
att_enc_out.append(x)
# aplly many-to-one attention block to the attended modalities
x = Concatenate(name='concat_modalities', axis=1)(att_enc_out)
encodings = attention_3d_block(x, dense_size=attention_size, modality='_modality')
# print(encodings.shape)
# print(weights_softmax.shape)
else:
encodings = encoder_outputs[0]
model_output = Dense(1, activation='sigmoid',
name='output_dense',
activity_regularizer=regularizers.l2(0.001))(encodings)
net_model = Model(inputs=network_inputs,
outputs=model_output)
net_model.summary()
# plot_model(net_model, to_file='MultiRNN3D_ATT.png')
return net_model
class PCPA_2D(ActionPredict):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
def __init__(self,
num_hidden_units=256,
cell_type='gru',
**kwargs):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
super().__init__(**kwargs)
# Network parameters
self._num_hidden_units = num_hidden_units
self._rnn = self._gru if cell_type == 'gru' else self._lstm
self._rnn_cell = GRUCell if cell_type == 'gru' else LSTMCell
# assert self._backbone in ['c3d', 'i3d'], 'Incorrect backbone {}! Should be C3D or I3D'.format(self._backbone)
self._3dconv = C3DNet if self._backbone == 'c3d' else I3DNet
# dropout = 0.0,
# dense_activation = 'sigmoid',
# freeze_conv_layers = False,
# weights = 'imagenet',
# num_classes = 1,
# backbone = 'vgg16',
# self._dropout = dropout
# self._dense_activation = dense_activation
# self._freeze_conv_layers = False
# self._weights = 'imagenet'
# self._num_classes = 1
# self._conv_models = {'vgg16': vgg16.VGG16, 'resnet50': resnet50.ResNet50, 'alexnet': AlexNet}
# self._backbone ='vgg16'
def get_data(self, data_type, data_raw, model_opts):
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
self._generator = model_opts.get('generator', False)
data_type_sizes_dict = {}
process = model_opts.get('process', True)
dataset = model_opts['dataset']
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
data_type_sizes_dict['box'] = data['box'].shape[1:]
if 'speed' in data.keys():
data_type_sizes_dict['speed'] = data['speed'].shape[1:]
# if 'context_cnn' in data.keys():
# data_type_sizes_dict['context_cnn'] = data['context_cnn'].shape[1:]
# Store the type and size of each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=None,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test'), data['crossing']) # set y to None
# global_pooling=self._global_pooling,
else:
_data = (_data, data['crossing'])
return {'data': _data,
'ped_id': data['ped_id'],
'tte': data['tte'],
'image': data['image'],
'data_params': {'data_types': data_types, 'data_sizes': data_sizes},
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def get_model(self, data_params):
return_sequence = True
data_sizes = data_params['data_sizes']
data_types = data_params['data_types']
network_inputs = []
encoder_outputs = []
core_size = len(data_sizes)
# conv3d_model = self._3dconv()
# network_inputs.append(conv3d_model.input)
#
attention_size = self._num_hidden_units
#
# if self._backbone == 'i3d':
# x = Flatten(name='flatten_output')(conv3d_model.output)
# x = Dense(name='emb_' + self._backbone,
# units=attention_size,
# activation='sigmoid')(x)
# else:
# x = conv3d_model.output
# x = Dense(name='emb_' + self._backbone,
# units=attention_size,
# activation='sigmoid')(x)
#
# encoder_outputs.append(x)
# self._conv_models = {'vgg16': vgg16.VGG16, 'resnet50': resnet50.ResNet50, 'alexnet': AlexNet}
# # self._backbone = backbone
# data_size = data_params['data_sizes'][0]
# input_data = Input(shape=data_size, name='input_' + data_types[0])
# context_net = self._conv_models[self._backbone](input_tensor=input_data,input_shape=data_size,
# include_top=False, weights=self._weights,
# pooling=self._pooling)
# x=context_net.outputs[0]
network_inputs.append(Input(shape=data_sizes[0], name='input_cnn_' + data_types[0]))
encoder_outputs.append(
self._rnn(name='enc_' + data_types[0], r_sequence=return_sequence)(network_inputs[0]))
# encoder_outputs.append(x)
# output = Dense(self._num_classes,
# activation=self._dense_activation,
# name='output_dense')(context_net.outputs[0])
# net_model = Model(inputs=context_net.inputs[0], outputs=output)
for i in range(1, core_size):
network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
encoder_outputs.append(
self._rnn(name='enc_' + data_types[i], r_sequence=return_sequence)(network_inputs[i]))
if len(encoder_outputs) > 1:
att_enc_out = []
# x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[0])
# att_enc_out.append(x) # first output is from 3d conv netwrok
# # x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[0])
# # att_enc_out.append(x) # first output is from 3d conv netwrok
# for recurrent branches apply many-to-one attention block
for i, enc_out in enumerate(encoder_outputs[0:]):
x = attention_3d_block(enc_out, dense_size=attention_size, modality='_' + data_types[i])
x = Dropout(0.5)(x)
x = Lambda(lambda x: K.expand_dims(x, axis=1))(x)
att_enc_out.append(x)
# aplly many-to-one attention block to the attended modalities
x = Concatenate(name='concat_modalities', axis=1)(att_enc_out)
encodings = attention_3d_block(x, dense_size=attention_size, modality='_modality')
# print(encodings.shape)
# print(weights_softmax.shape)
else:
encodings = encoder_outputs[0]
model_output = Dense(1, activation='sigmoid',
name='output_dense',
activity_regularizer=regularizers.l2(0.001))(encodings)
net_model = Model(inputs=network_inputs,
outputs=model_output)
net_model.summary()
plot_model(net_model, to_file='PCPA_2D.png')
return net_model
class MASK_PCPA_2D(ActionPredict):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
def __init__(self,
num_hidden_units=256,
cell_type='gru',
**kwargs):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
super().__init__(**kwargs)
# Network parameters
self._num_hidden_units = num_hidden_units
self._rnn = self._gru if cell_type == 'gru' else self._lstm
self._rnn_cell = GRUCell if cell_type == 'gru' else LSTMCell
# assert self._backbone in ['c3d', 'i3d'], 'Incorrect backbone {}! Should be C3D or I3D'.format(self._backbone)
self._3dconv = C3DNet if self._backbone == 'c3d' else I3DNet
# dropout = 0.0,
# dense_activation = 'sigmoid',
# freeze_conv_layers = False,
# weights = 'imagenet',
# num_classes = 1,
# backbone = 'vgg16',
# self._dropout = dropout
# self._dense_activation = dense_activation
# self._freeze_conv_layers = False
# self._weights = 'imagenet'
# self._num_classes = 1
# self._conv_models = {'vgg16': vgg16.VGG16, 'resnet50': resnet50.ResNet50, 'alexnet': AlexNet}
# self._backbone ='vgg16'
def get_data(self, data_type, data_raw, model_opts):
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
self._generator = model_opts.get('generator', False)
data_type_sizes_dict = {}
process = model_opts.get('process', True)
dataset = model_opts['dataset']
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
data_type_sizes_dict['box'] = data['box'].shape[1:]
if 'speed' in data.keys():
data_type_sizes_dict['speed'] = data['speed'].shape[1:]
# if 'context_cnn' in data.keys():
# data_type_sizes_dict['context_cnn'] = data['context_cnn'].shape[1:]
# Store the type and size of each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=None,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test'), data['crossing']) # set y to None
# global_pooling=self._global_pooling,
else:
_data = (_data, data['crossing'])
return {'data': _data,
'ped_id': data['ped_id'],
'tte': data['tte'],
'image': data['image'],
'data_params': {'data_types': data_types, 'data_sizes': data_sizes},
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def get_model(self, data_params):
return_sequence = True
data_sizes = data_params['data_sizes']
data_types = data_params['data_types']
network_inputs = []
encoder_outputs = []
core_size = len(data_sizes)
# conv3d_model | |
#########################################################################
# Dicomifier - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
#########################################################################
import json
import itertools
import pickle
import re
import numpy
import odil
from .. import logger
from . import siemens
def get_stacks(data_sets, extra_splitters=None):
""" Return the stacks contained in the data sets. The result is a dictionary
in which the values are pairs of (data_set, frame_index) (in the case
of single-frame data sets, frame_index is None), and in which the keys
are tuples of selectors. In this context, a selector is defined as
a pair of (group sequence, group, tag) (group sequence and group being
None for single-frame data sets), and a value.
:param data_sets: list of dicom data sets
:param extra_splitters: additional splitters to be used when building
stacks
"""
splitters = _get_splitters(data_sets)
if extra_splitters:
splitters.extend(extra_splitters)
stacks = {}
def build_selector(
data_set, getter, group_sequence, group, tag, in_stack_position):
selector = None
if getter is get_dimension_index:
original_getter = getter
getter = lambda d,t: original_getter(d, t, in_stack_position)
if group is not None and group in data_set:
value = getter(data_set[group][0], tag)
else:
value = getter(data_set, tag)
if value is not None:
selector = ((group_sequence, group, tag), tuple(value))
return selector
for data_set in data_sets:
frames = []
in_stack_position = None
if odil.registry.SharedFunctionalGroupsSequence not in data_set:
frames.append([(data_set,), None, [None]])
else:
in_stack_position = get_in_stack_position_index(data_set)
shared_groups = data_set[odil.registry.SharedFunctionalGroupsSequence][0]
frames_groups = data_set[odil.registry.PerFrameFunctionalGroupsSequence]
group_sequences = [
odil.registry.SharedFunctionalGroupsSequence,
odil.registry.PerFrameFunctionalGroupsSequence
]
frames.extend([
[(shared_groups, frame_groups), i, group_sequences]
for i, frame_groups in enumerate(frames_groups)])
for frame_infos, frame_index, group_sequences in frames:
key = []
for (group, tag), getter in splitters:
if frame_index is None and group is not None:
# Use top-level tags only for single-frame data sets
continue
elif frame_index is not None and group is None:
# Use frame group tags only for multi-frame data sets
continue
for frame_info, group_sequence in zip(frame_infos, group_sequences):
selector = build_selector(
frame_info, getter, group_sequence, group, tag,
in_stack_position)
if selector is not None:
key.append(selector)
stacks.setdefault(tuple(key), []).append((data_set, frame_index))
# Normalize the keys so that all stacks have the same key fields
key_items = set()
for key in stacks.keys():
key_items.update(x[0] for x in key)
normalized_keys = {}
for key in stacks.keys():
normalized_keys[key] = list(key)
for key_item in key_items:
if key_item not in [x[0] for x in key]:
normalized_keys[key].append((key_item, None))
for key, normalized_key in normalized_keys.items():
normalized_keys[key] = tuple(normalized_key)
stacks = { normalized_keys[key]: value for key, value in stacks.items() }
# Simplify keys: remove those that have the same value for all stacks
keys = numpy.asarray(list(stacks.keys()), dtype=object)
to_keep = []
for index in range(keys.shape[1]):
unique_values = set(keys[:,index,:][:,1])
# We need to keep these keys as they will be used in the sort() function
is_sorting_key = keys[:,index,:][0][0][2] in [
odil.registry.ImageOrientationPatient,
odil.registry.DimensionIndexValues
]
if len(unique_values) > 1 or is_sorting_key:
to_keep.append(index)
stacks = {
tuple(v for (i, v) in enumerate(stack_key) if i in to_keep): stack_value
for stack_key, stack_value in stacks.items()
}
return stacks
def sort(key, frames):
""" Sort the frames of a stack according to the items present in the
stack key.
"""
if len(frames) <= 1:
return
ordering = None
for (_, _, tag), value in key:
if tag == odil.registry.DimensionIndexValues:
# sort by In-Stack Position
position = []
for data_set, frame_index in frames:
position_index = get_in_stack_position_index(data_set)
frame = data_set[
odil.registry.PerFrameFunctionalGroupsSequence][frame_index]
frame_content = frame[odil.registry.FrameContentSequence][0]
position.append(
frame_content[odil.registry.DimensionIndexValues][position_index])
keydict = dict(zip((id(x) for x in frames), numpy.argsort(position)))
ordering = lambda x: keydict[id(x)]
break
if tag == odil.registry.ImageOrientationPatient:
data_set, frame_idx = frames[0]
if get_frame_position(data_set, frame_idx) is not None:
normal = numpy.cross(value[:3], value[3:])
ordering = lambda x: numpy.dot(get_frame_position(*x), normal)
break
else:
logger.warning(
"Orientation found but no position available to sort frames")
if ordering is not None:
frames.sort(key=ordering)
else:
logger.warning(
"Cannot sort frames for the moment, available tags : {}".format(
[x[0][2].get_name() for x in key]))
def get_frame_position(data_set, frame_index):
""" Get the position of the specified frame.
"""
if odil.registry.PerFrameFunctionalGroupsSequence in data_set:
frame = data_set[odil.registry.PerFrameFunctionalGroupsSequence][frame_index]
if odil.registry.PlanePositionSequence in frame:
plane_position_seq = frame[odil.registry.PlanePositionSequence][0]
else:
return None
else:
plane_position_seq = data_set
if odil.registry.ImagePositionPatient not in plane_position_seq:
return None
return plane_position_seq[odil.registry.ImagePositionPatient]
def get_in_stack_position_index(data_set):
""" Return the position of In Stack Position element inside the Dimension
Index.
"""
if (
odil.registry.DimensionIndexSequence in data_set
and not data_set.empty(odil.registry.DimensionIndexSequence)):
dimension_indices = data_set[odil.registry.DimensionIndexSequence]
position = set()
for i, dimension_index in enumerate(dimension_indices):
if odil.registry.DimensionIndexPointer in dimension_index:
idx = dimension_index[odil.registry.DimensionIndexPointer][0]
if odil.Tag(idx) == odil.registry.InStackPositionNumber:
position.add(i)
if len(position) == 1:
return list(position)[0]
else:
return None
else:
return None
class OrientationGetter(object):
""" Return the ideal orientation of a data set, i.e. allow small variations
in the actual orientation.
"""
def __init__(self):
self._orientations = {}
def __call__(self, data_set, tag):
value = data_set.get(tag)
if value is None:
return None
# WARNING: a rotating plane will yield the same normal
orientation = [value[:3], value[3:]]
normal = numpy.cross(*orientation)
closest = None
for candidate in self._orientations.items():
if OrientationGetter._comparator(normal, candidate[0]):
closest = candidate[1]
break
if closest is None:
self._orientations[tuple(normal)] = value
else:
value = closest
return tuple(value)
@property
def orientations(self):
return self._orientations
@staticmethod
def _comparator(o1, o2, epsilon=0.05):
if numpy.shape(o1) == (0,) and numpy.shape(o2) == (0,):
return True
elif any(numpy.shape(x) == (0,) for x in (o1, o2)):
return False
else:
return (
numpy.linalg.norm(numpy.subtract(o1, o2), numpy.inf) <= epsilon)
def get_dimension_index(data_set, tag, in_stack_position_index):
""" Return the dimension index pointer without InStackPosition in order to
find the different volumes
:param in_stack_position_index: index of the In Stack Position element
within the Dimension Index tuple
"""
value = data_set.get(tag)
if value is not None:
value = list(value)
if in_stack_position_index is not None:
del value[in_stack_position_index]
return tuple(value)
else:
raise Exception(
"Dimension Index Values found but InStackPosition is missing")
return None
def get_diffusion(data_set, tag):
""" Get b-value and gradient diffusion from the data_set.
"""
value = data_set.get(tag)
if value is not None:
b_value = value[0][odil.registry.DiffusionBValue][0]
directionality = value[0][odil.registry.DiffusionDirectionality][0]
sensitization = None
if directionality == b"DIRECTIONAL":
item = value[0][odil.registry.DiffusionGradientDirectionSequence][0]
sensitization = tuple(item[odil.registry.DiffusionGradientOrientation])
elif directionality == b"BMATRIX":
item = value[0][odil.registry.DiffusionBMatrixSequence][0]
sensitization = tuple(
item[getattr(odil.registry, "DiffusionBValue{}".format(x))][0]
for x in ["XX", "XY", "XZ", "YY", "YZ", "ZZ"])
elif directionality == b"ISOTROPIC" or directionality == b"NONE":
return None
else:
raise Exception(
"Unknown directionality: {}".format(directionality))
value = (b_value, sensitization)
return value
def frame_group_index_getter(data_set, tag):
""" Return bruker_to_dicom-specific frame group information.
"""
value = data_set.get(tag)
if value is None:
return value
frame_group_index_entries = [
x for x in value
if (
x[odil.registry.Manufacturer][0] == b"Dicomifier"
and x[odil.registry.ManufacturerModelName][0] == b"Bruker Frame Group index")]
if not frame_group_index_entries:
return None
elif len(frame_group_index_entries) > 1:
raise Exception("Multiple Frame Group index entries found")
contribution_description = json.loads(
frame_group_index_entries[0][
odil.registry.ContributionDescription][0].decode())
index = tuple(tuple(x) for x in contribution_description)
return index
def ge_diffusion_getter(data_set, tag):
""" Return GE-specific diffusion data.
"""
if data_set[odil.registry.Manufacturer][0] != b"GE MEDICAL SYSTEMS":
return None
# GEMS_ACQU_01 contains directions, GEMS_PARM_01 contains b-value
gems_acq = _find_private_creator(data_set, b"GEMS_ACQU_01", 0x0019)
gems_parm = _find_private_creator(data_set, b"GEMS_PARM_01", 0x0043)
direction = None
if gems_acq is not None:
direction = tuple(
data_set.get(odil.Tag(gems_acq+x), [None])[0]
for x in [0xbb, 0xbc, 0xbd])
if direction and not(isinstance(direction[0], (int, float))):
return None
# WARNING: this is the *maximal* b-value. The real b-value is determined
# by the square of the norm of the gradient direction (at on RX29.0).
# This is still not enough for multiple b=0 in the same series
maximal_b_value = data_set.get(odil.Tag(gems_parm+0x39), [None])[0]
if maximal_b_value is None:
maximal_b_value = data_set.get(odil.registry.DiffusionBValue, [None])[0]
if maximal_b_value and not(isinstance(maximal_b_value, (int, float))):
return None
# b-value, rounded to nearest multiple of 5
b_value = maximal_b_value * numpy.linalg.norm(direction)**2
b_value = 5 * round(b_value/5)
return direction, b_value
def ge_complex_image_component_getter(data_set, tag):
""" Return GE-specific Complex Image Component data.
"""
if data_set[odil.registry.Manufacturer][0] != b"GE MEDICAL SYSTEMS":
return None
gems_parm = _find_private_creator(data_set, b"GEMS_PARM_01", 0x0043)
if gems_parm is None:
return None
return (data_set.get(odil.Tag(gems_parm+0x2f), [None])[0], )
def siemens_coil_getter(data_set, tag):
""" Return Siemens-specific coil identifier.
"""
if data_set[odil.registry.Manufacturer][0] != b"SIEMENS":
return None
if data_set[odil.registry.Modality][0] != b"MR":
return None
csa_header = _find_private_creator(data_set, b"SIEMENS CSA HEADER", 0x0029)
| |
"""HTTP :mimetype:`multipart/*`-encoded file streaming.
"""
import abc
import collections.abc
import io
import os
import stat
import typing as ty
import urllib.parse
import uuid
from . import filescanner
from . import utils
if ty.TYPE_CHECKING: #COMPAT: Py3.7-
import typing_extensions as ty_ext
else:
from . import utils as ty_ext
gen_bytes_t = ty.Generator[bytes, ty.Any, ty.Any]
match_spec_t = ty.Optional[filescanner.match_spec_t[ty.AnyStr]]
default_chunk_size = io.DEFAULT_BUFFER_SIZE
def content_disposition_headers(filename: str, disptype: str = "form-data; name=\"file\"") \
-> ty.Dict[str, str]:
"""Returns a dict containing the MIME content-disposition header for a file.
.. code-block:: python
>>> content_disposition_headers('example.txt')
{'Content-Disposition': 'form-data; filename="example.txt"'}
>>> content_disposition_headers('example.txt', 'attachment')
{'Content-Disposition': 'attachment; filename="example.txt"'}
Parameters
----------
filename : str
Filename to retrieve the MIME content-disposition for
disptype : str
Rhe disposition type to use for the file
"""
disp = '{0}; filename="{1}"'.format(
disptype,
urllib.parse.quote(filename, safe='')
)
return {'Content-Disposition': disp}
def content_type_headers(filename: str, content_type: ty.Optional[str] = None) \
-> ty.Dict[str, str]:
"""Returns a dict with the content-type header for a file.
Guesses the mimetype for a filename and returns a dict
containing the content-type header.
.. code-block:: python
>>> content_type_headers('example.txt')
{'Content-Type': 'text/plain'}
>>> content_type_headers('example.jpeg')
{'Content-Type': 'image/jpeg'}
>>> content_type_headers('example')
{'Content-Type': 'application/octet-stream'}
Parameters
----------
filename
Filename to guess the content-type for
content_type
The Content-Type to use; if not set a content type will be guessed
"""
return {'Content-Type': content_type if content_type else utils.guess_mimetype(filename)}
def multipart_content_type_headers(boundary: str, subtype: str = 'mixed') -> ty.Dict[str, str]:
"""Creates a MIME multipart header with the given configuration.
Returns a dict containing a MIME multipart header with the given
boundary.
.. code-block:: python
>>> multipart_content_type_headers('8K5rNKlLQVyreRNncxOTeg')
{'Content-Type': 'multipart/mixed; boundary="8K5rNKlLQVyreRNncxOTeg"'}
>>> multipart_content_type_headers('8K5rNKlLQVyreRNncxOTeg', 'alt')
{'Content-Type': 'multipart/alt; boundary="8K5rNKlLQVyreRNncxOTeg"'}
Parameters
----------
boundary
The content delimiter to put into the header
subtype
The subtype in :mimetype:`multipart/*`-domain to put into the header
"""
ctype = 'multipart/{}; boundary="{}"'.format(
subtype,
boundary
)
return {'Content-Type': ctype}
class StreamBase(metaclass=abc.ABCMeta):
"""Generator that encodes multipart/form-data.
An abstract buffered generator class which encodes
:mimetype:`multipart/form-data`.
Parameters
----------
name
The name of the file to encode
chunk_size
The maximum size that any single file chunk may have in bytes
"""
__slots__ = ("chunk_size", "name", "_boundary", "_headers")
#chunk_size: int
#name: str
#_boundry: str
#_headers: ty.Dict[str, str]
def __init__(self, name: str, chunk_size: int = default_chunk_size) -> None:
self.chunk_size = chunk_size
self.name = name
self._boundary = uuid.uuid4().hex
self._headers = content_disposition_headers(name)
self._headers.update(multipart_content_type_headers(self._boundary, subtype='form-data'))
super().__init__()
def headers(self) -> ty.Dict[str, str]:
return self._headers.copy()
@abc.abstractmethod
def _body(self) -> gen_bytes_t:
"""Yields the body of this stream with chunks of undefined size"""
def body(self) -> gen_bytes_t:
"""Yields the body of this stream.
"""
# Cap all returned body chunks to the given chunk size
yield from self._gen_chunks(self._body())
def _gen_headers(self, headers: ty.Dict[str, str]) -> gen_bytes_t:
"""Yields the HTTP header text for some content
Parameters
----------
headers
The headers to yield
"""
for name, value in sorted(headers.items(), key=lambda i: i[0]):
yield b"%s: %s\r\n" % (name.encode("ascii"), value.encode("utf-8"))
yield b"\r\n"
def _gen_chunks(self, gen: ty.Iterable[bytes]) -> gen_bytes_t:
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen
The bytes generator that produces the bytes
"""
for data in gen:
#PERF: This is zero-copy if `len(data) <= self.chunk_size`
for offset in range(0, len(data), self.chunk_size):
yield data[offset:(self.chunk_size + offset)]
def _gen_item_start(self) -> gen_bytes_t:
"""Yields the body section for the content.
"""
yield b"--%s\r\n" % (self._boundary.encode("ascii"))
def _gen_item_end(self) -> gen_bytes_t:
"""Yields the body section for the content.
"""
yield b"\r\n"
def _gen_end(self) -> gen_bytes_t:
"""Yields the closing text of a multipart envelope."""
yield b'--%s--\r\n' % (self._boundary.encode("ascii"))
# mypy sucks… :-(gh/python/mypy#8705)
class _StreamFileMixinProto(ty_ext.Protocol):
@property
def chunk_size(self) -> int:
...
def _gen_headers(self, headers: ty.Dict[str, str]) -> gen_bytes_t:
...
def _gen_item_start(self) -> gen_bytes_t:
...
def _gen_item_end(self) -> gen_bytes_t:
...
def _gen_file_start(self: "_StreamFileMixinProto", filename: str,
file_location: ty.Optional[str] = None,
content_type: ty.Optional[str] = None) -> gen_bytes_t:
...
def _gen_file_chunks(self: "_StreamFileMixinProto", file: ty.IO[bytes]) -> gen_bytes_t:
...
def _gen_file_end(self: "_StreamFileMixinProto") -> gen_bytes_t:
...
class StreamFileMixin:
__slots__ = ()
def _gen_file(self: _StreamFileMixinProto, filename: str, file_location: ty.Optional[str] = None,
file: ty.Optional[ty.IO[bytes]] = None,
content_type: ty.Optional[str] = None) -> gen_bytes_t:
"""Yields the entire contents of a file.
Parameters
----------
filename
Filename of the file being opened and added to the HTTP body
file_location
Full path to the file being added, including the filename
file
The binary file-like object whose contents should be streamed
No contents will be streamed if this is ``None``.
content_type
The Content-Type of the file; if not set a value will be guessed
"""
yield from self._gen_file_start(filename, file_location, content_type)
if file:
yield from self._gen_file_chunks(file)
yield from self._gen_file_end()
def _gen_file_start(self: _StreamFileMixinProto, filename: str,
file_location: ty.Optional[str] = None,
content_type: ty.Optional[str] = None) -> gen_bytes_t:
"""Yields the opening text of a file section in multipart HTTP.
Parameters
----------
filename
Filename of the file being opened and added to the HTTP body
file_location
Full path to the file being added, including the filename
content_type
The Content-Type of the file; if not set a value will be guessed
"""
yield from self._gen_item_start()
headers = content_disposition_headers(filename.replace(os.sep, "/"))
headers.update(content_type_headers(filename, content_type))
if file_location and os.path.isabs(file_location):
headers.update({"Abspath": file_location})
yield from self._gen_headers(headers)
def _gen_file_chunks(self: _StreamFileMixinProto, file: ty.IO[bytes]) -> gen_bytes_t:
"""Yields chunks of a file.
Parameters
----------
fp
The file to break into chunks
(must be an open file or have the ``readinto`` method)
"""
while True:
buf = file.read(self.chunk_size)
if len(buf) < 1:
break
yield buf
def _gen_file_end(self: _StreamFileMixinProto) -> gen_bytes_t:
"""Yields the end text of a file section in HTTP multipart encoding."""
return self._gen_item_end()
class FilesStream(StreamBase, StreamFileMixin):
"""Generator that encodes multiples files into HTTP multipart.
A buffered generator that encodes an array of files as
:mimetype:`multipart/form-data`. This is a concrete implementation of
:class:`~ipfsapi.multipart.StreamBase`.
Parameters
----------
files
The name, file object or file descriptor of the file to encode; may also
be a list of several items to allow for more efficient batch processing
chunk_size
The maximum size that any single file chunk may have in bytes
"""
__slots__ = ("files",)
#files: ty.Union[utils.clean_file_t, ty.Iterable[utils.clean_file_t]]
def __init__(self, files: ty.Union[utils.clean_file_t, ty.Iterable[utils.clean_file_t]],
name: str = "files", chunk_size: int = default_chunk_size) -> None:
self.files = utils.clean_files(files)
super().__init__(name, chunk_size=chunk_size)
def _body(self) -> gen_bytes_t:
"""Yields the body of the buffered file."""
for file, need_close in self.files:
try:
try:
file_location = file.name # type: ty.Optional[str]
filename = os.path.basename(file.name) # type: str
except AttributeError:
file_location = None
filename = ''
yield from self._gen_file(filename, file_location, file)
finally:
if need_close:
file.close()
yield from self._gen_end()
class DirectoryStream(StreamBase, StreamFileMixin, ty.Generic[ty.AnyStr]):
"""Generator that encodes a directory into HTTP multipart.
A buffered generator that encodes an array of files as
:mimetype:`multipart/form-data`. This is a concrete implementation of
:class:`~ipfshttpclient4ipwb4ipwb.multipart.StreamBase`.
Parameters
----------
directory
The filepath or file descriptor of the directory to encode
File descriptors are only supported on Unix.
dirpath
The path to the directory being uploaded, if this is absolute it will be
included in a header for each emitted file and enables use of the no-copy
filestore facilities
If the *wrap_with_directory* attribute is ``True`` during upload the
string ``dirpath.name if dirpath else '_'`` will be visible as the name
of the uploaded directory within its wrapper.
chunk_size
The maximum size that any single file chunk may have in bytes
patterns
One or several glob patterns or compiled regular expression objects used
to determine which files to upload
Only files or directories matched by any of these patterns will be
uploaded. If a directory is not matched directly but contains at least
one file or directory below it that is, it will be included in the upload
as well but will not include other items. If a directory matches any
of the given patterns and *recursive* is then it, as well as all other
files and directories below it, will be included as well.
period_special
Whether a leading period in file/directory names should be matchable by
``*``, ``?`` and ``[…]`` – traditionally they are not, but many modern
shells allow one to disable this behaviour
"""
__slots__ = ("abspath", "follow_symlinks", "scanner")
#abspath: ty.Optional[ty.AnyStr]
#follow_symlinks: bool
#scanner: filescanner.walk[ty.AnyStr]
def __init__(self, directory: ty.Union[ty.AnyStr, utils.PathLike[ty.AnyStr], int], *,
chunk_size: int = default_chunk_size,
follow_symlinks: bool = False,
patterns: match_spec_t[ty.AnyStr] = None,
period_special: bool = True,
recursive: bool = False) -> None:
self.follow_symlinks = follow_symlinks
if not isinstance(directory, int):
directory = utils.convert_path(directory)
# Create file scanner from parameters
self.scanner = filescanner.walk(
directory, patterns, follow_symlinks=follow_symlinks,
period_special=period_special, recursive=recursive
) # type: filescanner.walk[ty.AnyStr]
# Figure out the absolute path of the directory added
self.abspath = None # type: ty.Optional[ty.AnyStr]
if not isinstance(directory, int):
self.abspath = os.path.abspath(utils.convert_path(directory))
# Figure out basename of the containing directory
# (normpath is an acceptable approximation here)
basename = "_" # type: ty.Union[str, bytes]
if not isinstance(directory, int):
basename = os.fsdecode(os.path.basename(os.path.normpath(directory)))
super().__init__(os.fsdecode(basename), chunk_size=chunk_size)
def _body(self) -> gen_bytes_t:
"""Streams the contents of the selected directory as binary chunks."""
try:
for type, path, relpath, name, parentfd in self.scanner:
relpath_unicode = os.fsdecode(relpath).replace(os.path.sep, "/")
short_path = self.name + (("/" + relpath_unicode) if relpath_unicode != "." else "")
if type is filescanner.FSNodeType.FILE:
try:
# Only regular files and directories can be uploaded
if parentfd is not None:
stat_data = os.stat(name, dir_fd=parentfd, follow_symlinks=self.follow_symlinks)
else:
stat_data = os.stat(path, follow_symlinks=self.follow_symlinks)
if not stat.S_ISREG(stat_data.st_mode):
continue
absolute_path = None # type: ty.Optional[str]
if self.abspath is not None:
absolute_path = os.fsdecode(os.path.join(self.abspath, relpath))
if parentfd is None:
f_path_or_desc = path # type: ty.Union[ty.AnyStr, int]
else:
f_path_or_desc = os.open(name, os.O_RDONLY | os.O_CLOEXEC, dir_fd=parentfd)
# Stream file to client
with open(f_path_or_desc, "rb") as file:
yield from self._gen_file(short_path, absolute_path, file)
except OSError as e:
print(e)
# File might have disappeared between `os.walk()` and `open()`
pass
elif type is filescanner.FSNodeType.DIRECTORY:
# Generate directory as special empty file
yield from self._gen_file(short_path, content_type="application/x-directory")
yield from self._gen_end()
finally:
self.scanner.close()
class BytesFileStream(FilesStream):
"""A buffered generator that encodes bytes as file in
:mimetype:`multipart/form-data`.
Parameters
----------
data
The binary data to stream to the daemon
name
The filename to report to the daemon for this upload
chunk_size
The maximum size of a single data chunk
"""
__slots__ = ("data",)
#data: ty.Iterable[bytes]
def __init__(self, data: ty.Union[bytes, gen_bytes_t], name: str = "bytes", *,
chunk_size: int = default_chunk_size) -> None:
super().__init__([], name=name, chunk_size=chunk_size)
if not isinstance(data, bytes):
self.data = data # type: ty.Iterable[bytes]
else:
self.data = (data,)
def body(self) -> gen_bytes_t:
"""Yields the encoded body."""
yield from self._gen_file_start(self.name)
yield from self._gen_chunks(self.data)
yield from | |
== 'Failed':
warning_checks['#13:Check:vManage:Network Card type'] = [ check_analysis, check_action]
log_file_logger.error('#13: Check result: {}'.format(check_result))
log_file_logger.error('#13: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#13: Ethercardswith e1000 card types: {}\n'.format(eth_drivers))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#13: Check result: {}'.format(check_result))
log_file_logger.info('#13: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #13:Check:vManage:Network Card type. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#14:Check:vManage:Backup status
print(' #14:Checking:vManage:Backup status')
log_file_logger.info('#14:Check:vManage:Backup status')
writeFile(report_file, '#14:Check:vManage:Backup status\n\n')
try:
date_time_obj, check_result, check_analysis, check_action = warningCheckthree()
if check_result == 'Failed':
warning_checks['#14:Check:vManage:Backup status'] = [ check_analysis, check_action]
log_file_logger.error('#14: Check result: {}'.format(check_result))
log_file_logger.error('#14: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#14: Last Backup was performed on: {}\n'.format(date_time_obj))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#14: Check result: {}'.format(check_result))
log_file_logger.info('#14: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #14:Check:vManage:Backup status. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#15:Check:vManage:Evaluate Neo4j performance
print(' #15:Checking:vManage:Evaluate Neo4j performance')
log_file_logger.info('#15:Check:vManage:Evaluate Neo4j performance')
writeFile(report_file, '#15:Check:vManage:Evaluate Neo4j performance\n\n')
try:
check_result, check_analysis, check_action = warningCheckfour()
if check_result == 'Failed':
warning_checks['#15:Check:vManage:Backup status'] = [ check_analysis, check_action]
log_file_logger.error('#15: Check result: {}'.format(check_result))
log_file_logger.error('#15: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#15: Check result: {}'.format(check_result))
log_file_logger.info('#15: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #15:Check:vManage:Evaluate Neo4j performance. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#16:Check:vManage:Confirm there are no pending tasks
print(' #16:Checking:vManage:Confirm there are no pending tasks')
log_file_logger.info('#16:Check:vManage:Confirm there are no pending tasks')
writeFile(report_file, '#16:Check:vManage:Confirm there are no pending tasks\n\n')
try:
tasks = json.loads(getRequest(version_tuple,vmanage_lo_ip,jsessionid, 'device/action/status/tasks', args.vmanage_port))
tasks_running, check_result, check_analysis, check_action = warningCheckfive(tasks)
if check_result == 'Failed':
warning_checks['#16:Check:vManage:Confirm there are no pending tasks'] = [ check_analysis, check_action]
log_file_logger.error('#16:Check result: {}'.format(check_result))
log_file_logger.error('#16: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#16: Tasks still running: {}\n'.format(tasks_running))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#16:Check result: {}'.format(check_result))
log_file_logger.info('#16: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #16:Check:vManage:Confirm there are no pending tasks. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#17:Check:vManage:Validate there are no empty password users
print(' #17:Checking:vManage:Validate there are no empty password users')
log_file_logger.info('#17:Check:vManage:Validate there are no empty password users')
writeFile(report_file, '#17:Check:vManage:Validate there are no empty password users\n\n')
try:
users_emptypass, check_result, check_analysis, check_action = warningChecksix(version_tuple)
if check_result == 'Failed':
warning_checks['#17:Check:vManage:Validate there are no empty password users'] = [ check_analysis, check_action]
log_file_logger.error('#17: Check result: {}'.format(check_result))
log_file_logger.error('#17: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#17: Users with empty passwords: {}\n'.format(users_emptypass))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#17: Check result: {}'.format(check_result))
log_file_logger.info('#17: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #17:Check:vManage:Validate there are no empty password users. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#18:Check:Controllers:Controller versions
print(' #18:Checking:Controllers:Controller versions')
log_file_logger.info('#18:Check:Controllers:Controller versions')
writeFile(report_file, '#18:Check:Controllers:Controller versions\n\n')
try:
check_result, check_analysis, check_action = warningCheckseven(controllers_info)
if check_result == 'Failed':
warning_checks['18:Check:Controllers:Controller versions'] = [ check_analysis, check_action]
log_file_logger.error('#18: Check result: {}'.format(check_result))
log_file_logger.error('#18: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#18: Check result: {}'.format(check_result))
log_file_logger.info('#18: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #18:Check:Controllers:Controller versions. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#19:Check:Controllers:Confirm Certificate Expiration Dates
print(' #19:Checking:Controllers:Confirm Certificate Expiration Dates')
log_file_logger.info('#19:Check:Controllers:Confirm Certificate Expiration Dates')
writeFile(report_file, '#19:Check:Controllers:Confirm Certificate Expiration Dates\n\n')
try:
controllers_exp, controllers_notexp, check_result, check_analysis, check_action = warningCheckeight(controllers_info)
if check_result == 'Failed':
warning_checks['#19:Check:Controllers:Confirm Certificate Expiration Dates'] = [ check_analysis, check_action]
log_file_logger.error('#19: Check result: {}'.format(check_result))
log_file_logger.error('#19: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#19: Controllers with certificates close to expiration: \n{}\n'.format(controllers_exp))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#19: Check result: {}'.format(check_result))
log_file_logger.info('#19: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #19:Check:Controllers:Confirm Certificate Expiration Dates. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#20:Check:Controllers:vEdge list sync
print(' #20:Checking:Controllers:vEdge list sync')
log_file_logger.info('#20:Check:Controllers:vEdge list sync')
writeFile(report_file, '#20:Check:Controllers:vEdge list sync\n\n')
try:
state_vedgeList,check_result, check_analysis, check_action = warningChecknine(controllers_info)
if check_result == 'Failed':
warning_checks['#20:Check:Controllers:Controller versions'] = [ check_analysis, check_action]
log_file_logger.error('#20: Check result: {}'.format(check_result))
log_file_logger.error('#20: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#20: Controllers with inconsistent state_vedgeList: {}\n'.format(state_vedgeList))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#20: Check result: {}'.format(check_result))
log_file_logger.info('#20: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #20:Check:Controllers:vEdge list sync. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#21:Check:Controllers: Confirm control connections
print(' #21:Checking:Controllers: Confirm control connections')
log_file_logger.info('#21:Check:Controllers: Confirm control connections')
writeFile(report_file, '#21:Check:Controllers: Confirm control connections\n\n')
try:
control_sum_tab, discrepancy,check_result, check_analysis, check_action = warningCheckten(vsmart_count, vbond_count)
if check_result == 'Failed':
warning_checks['#21:Check:Controllers: Confirm control connections'] = [ check_analysis, check_action]
log_file_logger.error('#21: Check result: {}'.format(check_result))
log_file_logger.error('#21: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#21: Control Connections Summary: \n{}\n'.format(control_sum_tab))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#21: Check result: {}'.format(check_result))
log_file_logger.info('#21: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #21:Check:Controllers: Confirm control connections. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#Informational Checks
print('\n**** Performing Informational checks\n')
log_file_logger.info('*** Performing Informational Checks')
#22:Check:vManage:Disk controller type
print(' #22:Check:vManage:Disk controller type')
log_file_logger.info('#22:Check:vManage:Disk controller type')
writeFile(report_file, '#22:Check:vManage:Disk controller type\n\n')
try:
check_result, check_analysis, check_action = infoCheckone(server_type, disk_controller)
if check_result == 'Failed':
warning_checks['#22:Check:vManage:Disk controller type'] = [ check_analysis, check_action]
log_file_logger.error('#22: Check result: {}'.format(check_result))
log_file_logger.error('#22: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#22: Disk Controller type: {}\n'.format(disk_controller))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#22: Check result: {}'.format(check_result))
log_file_logger.info('#22: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#22: Disk Controller type: {}\n'.format(disk_controller))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #22:Check:vManage:Disk controller type. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#23:Check:Controllers:Validate there is at minimum vBond, vSmart present
print(' #23:Check:Controllers:Validate there is at minimum vBond, vSmart present')
log_file_logger.info('#23:Check:Controllers:Validate there is at minimum vBond, vSmart present')
writeFile(report_file, '#23:Check:Controllers:Validate there is at minimum vBond, vSmart present\n\n')
try:
check_result, check_analysis, check_action = infoChecktwo(vsmart_count,vbond_count)
if check_result == 'Failed':
warning_checks['#23:Check:Controllers:Validate there is at minimum vBond, vSmart present'] = [ check_analysis, check_action]
log_file_logger.error('#23: Check result: {}'.format(check_result))
log_file_logger.error('#23: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#23: vSmart Count: {}'.format(vsmart_count))
log_file_logger.error('#23: vBond Count: {}\n'.format(vbond_count))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#23: Check result: {}'.format(check_result))
log_file_logger.info('#23: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#23: vSmart Count: {}'.format(vsmart_count))
log_file_logger.info('#23: vBond Count: {}\n'.format(vbond_count))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #23:Check:Controllers:Validate there is at minimum vBond, vSmart present. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#24:Check:Controllers:Validate all controllers are reachable
print(' #24:Check:Controllers:Validate all controllers are reachable')
log_file_logger.info('#24:Check:Controllers:Validate all controllers are reachable')
writeFile(report_file, '#24:Check:Controllers:Validate all controllers are reachable\n\n')
try:
unreach_controllers,check_result, check_analysis, check_action = infoChecktthree(controllers_info)
if check_result == 'Failed':
warning_checks['#24:Check:Controllers:Validate all controllers are reachable'] = [ check_analysis, check_action]
log_file_logger.error('#24: Check result: {}'.format(check_result))
log_file_logger.error('#24: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#24: Unreachable Controllers: {}\n'.format(unreach_controllers))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#24: Check result: {}'.format(check_result))
log_file_logger.info('#24: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #24:Check:Controllers:Validate all controllers are reachable. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
if cluster_size>1:
cluster_checks = {}
log_file_logger.info('*** Performing Cluster Checks')
print('\n**** Performing Cluster checks\n')
#25:Check:Cluster:Version consistency
print(' #25:Checking:Cluster:Version consistency')
log_file_logger.info('#25:Check:Cluster:Version consistency')
writeFile(report_file, '#25:Check:Cluster:Version consistency\n\n')
try:
check_result,check_analysis, check_action = criticalChecktwelve(vmanage_info)
if check_result == 'Failed':
cluster_checks['#25:Check:Cluster:Version consistency'] = [ check_analysis, check_action]
log_file_logger.error('#25: Check result: {}'.format(check_result))
log_file_logger.error('#25: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#25: vManage info: {}\n'.format(vmanage_info))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#25: Check result: {}'.format(check_result))
log_file_logger.info('#25: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#25: vManage info: {}\n'.format(vmanage_info))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #25:Check:Cluster:Version consistency. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#26:Check:Cluster:Cluster health
print(' #26:Checking:Cluster:Cluster health')
log_file_logger.info('#26:Check:Cluster:Cluster health')
writeFile(report_file, '#26:Check:Cluster:Cluster health\n\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip,jsessionid, 'clusterManagement/list', args.vmanage_port))
services_down, check_result, check_analysis, check_action = criticalCheckthirteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#26:Check:Cluster:Cluster health'] = [ check_analysis, check_action]
log_file_logger.error('#26: Check result: {}'.format(check_result))
log_file_logger.error('#26: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#26: Relevant cluster services that are down: {}\n'.format(services_down))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#26: Check result: {}'.format(check_result))
log_file_logger.info('#26: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #26:Check:Cluster:Cluster health. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and | |
'mò',
0x25C7A: 'qiān',
0x25C7B: 'chì,tú',
0x25C7C: 'pái,pì',
0x25C7D: 'juàn',
0x25C80: 'cháo',
0x25C81: 'liè',
0x25C82: 'bīng',
0x25C83: 'kòu',
0x25C84: 'dàn',
0x25C85: 'chóu',
0x25C86: 'tōng',
0x25C87: 'dàn',
0x25C88: 'mǎn',
0x25C89: 'hù',
0x25C8A: 'liáo',
0x25C8B: 'xián',
0x25C8D: 'cáo',
0x25C8E: 'lù',
0x25C8F: 'chuàn',
0x25C90: 'wú',
0x25C91: 'mán',
0x25C95: 'zǐ',
0x25C97: 'dù',
0x25C9A: 'shuàng',
0x25C9B: 'fù',
0x25C9C: 'jù',
0x25C9D: 'zhòu',
0x25C9F: 'diào',
0x25CA0: 'wàng',
0x25CA1: 'chuāng',
0x25CA2: 'qiān',
0x25CA3: 'tuì',
0x25CA5: 'lián',
0x25CA6: 'biāo',
0x25CA7: 'lí',
0x25CAA: 'lí',
0x25CBD: 'cuó',
0x25CC6: 'bì',
0x25CC7: 'fù',
0x25CC8: 'cuì',
0x25CC9: 'dū',
0x25CCB: 'zàn,zān',
0x25CCC: 'lóng',
0x25CCD: 'xún',
0x25CCE: 'qióng',
0x25CCF: 'jī',
0x25CD0: 'qiǎn',
0x25CD2: 'jiǎn',
0x25CD3: 'shāo',
0x25CD4: 'duò',
0x25CD5: 'shū',
0x25CD6: 'bù',
0x25CD7: 'xū',
0x25CD8: 'dǒng',
0x25CDA: 'rán',
0x25CDC: 'yáng',
0x25CDD: 'ruǐ',
0x25CDE: 'lìn',
0x25CDF: 'jiǎn',
0x25CE0: 'dì',
0x25CE1: 'fén',
0x25CE2: 'diàn',
0x25CE3: 'zuì',
0x25CE5: 'nǐng',
0x25CEA: 'suàn',
0x25CEB: 'tiǎn',
0x25CEC: 'àn',
0x25CEF: 'cè',
0x25CF0: 'dìng',
0x25CF1: 'shēn',
0x25CF2: 'dù',
0x25CF3: 'tí',
0x25CF4: 'jiǎo',
0x25CF5: 'zuì',
0x25CF6: 'zhǎng',
0x25CF7: 'jiǎn',
0x25CF8: 'dàn',
0x25CF9: 'dǎn',
0x25CFA: 'sǒng',
0x25CFD: 'qī',
0x25D10: 'zhǎn',
0x25D11: 'tíng',
0x25D12: 'zhì',
0x25D15: 'yóu',
0x25D16: 'pái',
0x25D21: 'lǐ',
0x25D24: 'qián',
0x25D26: 'suì,dí',
0x25D27: 'jǔ',
0x25D28: 'ài',
0x25D29: 'gé',
0x25D2A: 'jù',
0x25D2B: 'tún,diàn',
0x25D2C: 'bì',
0x25D2D: 'qià',
0x25D2E: 'bó',
0x25D2F: 'huì',
0x25D31: 'jiàn',
0x25D34: 'gōu',
0x25D35: 'suàn',
0x25D3A: 'cí',
0x25D3B: 'qiàng',
0x25D3F: 'yán',
0x25D4F: 'diàn',
0x25D52: 'miè',
0x25D5C: 'pò',
0x25D5D: 'lǐng',
0x25D5E: 'jié',
0x25D5F: 'zhù',
0x25D60: 'gǔ',
0x25D63: 'duān',
0x25D64: 'zhào',
0x25D66: 'shǎo',
0x25D67: 'qǐn',
0x25D68: 'mí',
0x25D6A: 'píng',
0x25D6B: 'cóng',
0x25D6C: 'chōu',
0x25D6F: 'sà',
0x25D76: 'tiǎn',
0x25D78: 'mí',
0x25D85: 'liú',
0x25D86: 'lǘ',
0x25D87: 'lǔ',
0x25D88: 'zōu',
0x25D8C: 'lǜ',
0x25D8D: 'huǎn',
0x25D8F: 'tiáo',
0x25D90: 'tuí',
0x25D91: 'qiǎng',
0x25D92: 'lìn',
0x25D93: 'bēi',
0x25D94: 'páo',
0x25D95: 'zhān',
0x25D97: 'lì',
0x25D9B: 'tí',
0x25D9C: 'hú',
0x25DA2: 'liè',
0x25DB5: 'huǐ',
0x25DB6: 'qū',
0x25DB7: 'xuǎn',
0x25DB9: 'jìng',
0x25DBA: 'dié',
0x25DBB: 'suí',
0x25DBD: 'wèi',
0x25DBF: 'yán',
0x25DC0: 'yān',
0x25DC1: 'bàn',
0x25DC3: 'jiǎng',
0x25DC4: 'nǐ',
0x25DC5: 'lì',
0x25DC6: 'hú',
0x25DC7: 'qì',
0x25DC8: 'zhōng',
0x25DD4: 'yú',
0x25DD5: 'dié',
0x25DD6: 'lìn',
0x25DD7: 'lì',
0x25DD8: 'zhuó',
0x25DD9: 'jì',
0x25DDA: 'jū',
0x25DDC: 'fēng',
0x25DDE: 'yù',
0x25DE8: 'liè',
0x25DE9: 'zá',
0x25DEA: 'qián',
0x25DEB: 'jiē',
0x25DEC: 'guān',
0x25DEE: 'zhuó,zhāo',
0x25DF1: 'fù',
0x25DF9: 'sè',
0x25DFC: 'cù',
0x25E03: 'huǐ',
0x25E05: 'biān',
0x25E08: 'dàng',
0x25E09: 'lóng',
0x25E0A: 'yì',
0x25E17: 'sǎ',
0x25E18: 'yuè',
0x25E1A: 'dí',
0x25E21: 'gǎn',
0x25E22: 'zān',
0x25E23: 'shàn',
0x25E24: 'yù',
0x25E25: 'bǒ',
0x25E27: 'dìng',
0x25E28: 'fán,bǒ,bǔ',
0x25E2A: 'yù',
0x25E2C: 'shēn',
0x25E32: 'gōng',
0x25E34: 'miè',
0x25E35: 'tún',
0x25E38: 'liè',
0x25E41: 'zhā,zuò',
0x25E42: 'pēi',
0x25E44: 'mí',
0x25E46: 'míng',
0x25E47: 'fàn',
0x25E48: 'tuó',
0x25E49: 'nà',
0x25E4A: 'sì',
0x25E4B: 'yí',
0x25E4C: 'jiā',
0x25E4D: 'zhù',
0x25E53: 'bān',
0x25E54: 'yù',
0x25E56: 'pǒ',
0x25E5A: 'huān',
0x25E5B: 'càn',
0x25E5C: 'jiāo',
0x25E60: 'tán',
0x25E69: 'zhì',
0x25E6B: 'mǐ',
0x25E6C: 'kǎo',
0x25E71: 'yāo',
0x25E72: 'duì',
0x25E73: 'quǎn,huán',
0x25E74: 'bù',
0x25E75: 'chù',
0x25E76: 'qiǎo',
0x25E77: 'liú',
0x25E78: 'bó',
0x25E7A: 'kāng',
0x25E7B: 'fèn',
0x25E89: 'dòu',
0x25E8A: 'gé',
0x25E99: 'líng',
0x25E9A: 'xí',
0x25E9C: 'nì',
0x25E9D: 'zhōu',
0x25E9E: 'zhōu,yù',
0x25EA1: 'chī',
0x25EA3: 'chōu',
0x25EB4: 'niān',
0x25EB5: 'jī',
0x25EB7: 'qū',
0x25EC4: 'kāi',
0x25EC6: 'sù',
0x25EC7: 'xiàn',
0x25EC9: 'hé',
0x25ECB: 'lín',
0x25ECD: 'zī',
0x25ED1: 'ǒu,lì',
0x25ED2: 'cù,mì',
0x25ED7: 'chá',
0x25EDD: 'zhòng',
0x25EDE: 'bú',
0x25EE4: 'chōu',
0x25EE5: 'xì',
0x25EE6: 'sà',
0x25EE7: 'xián,jiān',
0x25EE8: 'sè',
0x25EE9: 'miàn',
0x25EEB: 'fán',
0x25EEC: 'zhī',
0x25EEE: 'cuì',
0x25EF4: 'xià',
0x25EF5: 'cí',
0x25EFE: 'nuò',
0x25EFF: 'lí',
0x25F00: 'zú',
0x25F02: 'cuī',
0x25F03: 'zé',
0x25F05: 'lí',
0x25F0B: 'lí',
0x25F13: 'bèi',
0x25F18: 'qí',
0x25F1A: 'zhuō',
0x25F1B: 'cuì',
0x25F1C: 'pū',
0x25F1D: 'chī',
0x25F1E: 'fán',
0x25F1F: 'tán',
0x25F29: 'zī',
0x25F2A: 'zǔ',
0x25F2B: 'zhōu',
0x25F2C: 'róng',
0x25F2D: 'lín',
0x25F2E: 'tán',
0x25F36: 'shì',
0x25F38: 'líng',
0x25F3A: 'cuǐ',
0x25F3B: 'zī',
0x25F3C: 'fū',
0x25F41: 'xiào',
0x25F48: 'fēng,lǐ',
0x25F4F: 'xiàn',
0x25F50: 'jiàn',
0x25F52: 'fèn',
0x25F57: 'lì',
0x25F58: 'mò,miè',
0x25F5F: 'yōu',
0x25F63: 'líng',
0x25F65: 'huò',
0x25F67: 'qū',
0x25F6C: 'niàng',
0x25F70: 'mí',
0x25F73: 'qì',
0x25F76: 'hé',
0x25F78: 'liàn',
0x25F79: 'cào',
0x25F7F: 'zuò',
0x25F82: 'líng',
0x25F85: 'zhú',
0x25F87: 'niǎo',
0x25F8A: 'jǐ',
0x25F8B: 'réng',
0x25F8C: 'jié',
0x25F8D: 'gǎn',
0x25F90: 'yì',
0x25F93: 'zhóu',
0x25F95: 'wù',
0x25F9A: 'gěng,dǎn',
0x25F9B: 'cù',
0x25F9D: 'miè,miǎn',
0x25FA1: 'xún,jī',
0x25FA3: 'zhī',
0x25FA4: 'xiáo',
0x25FA7: 'fú',
0x25FA8: 'hú',
0x25FAC: 'dī',
0x25FAE: 'jué',
0x25FAF: 'diào',
0x25FB9: 'shǒu',
0x25FBC: 'wǎng',
0x25FC3: 'nà',
0x25FC4: 'dī',
0x25FC5: 'shì',
0x25FC6: 'cí',
0x25FC7: 'shū',
0x25FC9: 'wà,mò',
0x25FCA: 'chè',
0x25FCB: 'fán,biàn',
0x25FCD: 'gū',
0x25FCE: 'yuān,wǎn',
0x25FD1: 'guān,lún',
0x25FDA: 'qiè',
0x25FDC: 'zhǎn,zhěn',
0x25FDD: 'dài',
0x25FDE: 'shē',
0x25FE6: 'zhōu',
0x25FE7: 'xiǎng',
0x25FE8: 'míng',
0x25FE9: 'zì',
0x25FEA: 'huāng',
0x25FEB: 'mí,yì,wèi',
0x25FED: 'xì',
0x25FEE: 'zhì,shì',
0x25FEF: 'pài',
0x25FF0: 'duǒ',
0x25FF4: 'cì',
0x25FF5: 'móu',
0x25FF7: 'chào',
0x25FF9: 'yì',
0x25FFA: 'gōu',
0x26007: 'jīng',
0x26013: 'zēng,jiē',
0x26014: 'pīng',
0x26015: 'yè',
0x26016: 'jié',
0x26018: 'pī,bī',
0x2601B: 'shā',
0x2601C: 'zhuàng',
0x2601D: 'jiǒng',
0x26020: 'liú',
0x26021: 'yǔ',
0x26023: 'jū',
0x26028: 'nuò',
0x26038: 'mào',
0x26044: 'chēn',
0x26046: 'zhuàn,juàn,shuàn',
0x26047: 'niàn',
0x26048: 'kòng',
0x26049: 'jiē',
0x2604A: 'huà',
0x2604D: 'xīn',
0x2604E: 'zuó',
0x2604F: 'yàn',
0x26050: 'jué',
0x26055: 'hū',
0x26056: 'zhòu',
0x26057: 'shè',
0x26059: 'yǎn',
0x2605B: 'xiè,dié',
0x2605C: 'dié',
0x2605F: 'chēn,chén,zhěn',
0x26072: 'jiǎn',
0x26073: 'jì',
0x26076: 'chuò',
0x26077: 'hóng',
0x26080: 'dá',
0x26084: 'kāi',
0x26085: 'xīng,xǐ',
0x26086: 'huì',
0x26087: 'jiǎn',
0x26088: 'zhòu',
0x26089: 'zhǎ',
0x2608A: 'fù',
0x2608B: 'chì',
0x2608C: 'běng',
0x2608D: 'nuò',
0x26090: 'gōu',
0x26091: 'jì',
0x26092: 'qián',
0x26094: 'wàn',
0x26095: 'óu',
0x26096: 'bì',
0x26097: 'shuò',
0x260A0: 'jīng',
0x260A1: 'yè',
0x260B6: 'yǐ',
0x260C4: 'fěi',
0x260C7: 'lí',
0x260CA: 'lì',
0x260CB: 'pí',
0x260D2: 'suì',
0x260D3: 'liú',
0x260D4: 'hé',
0x260D5: 'hǔn',
0x260D6: 'tǎn',
0x260D7: 'shuò',
0x260D8: 'zhì',
0x260D9: 'bó',
0x260DD: 'xì',
0x260E1: 'pó,tāo',
0x260E2: 'qǔn',
0x260E4: 'mù',
0x260FD: 'yōng',
0x26102: 'dài',
0x2610A: 'qǐ',
0x2610B: 'diǎo',
0x2610C: 'niè',
0x2610D: 'shuǎng',
0x2610F: 'shāo',
0x26110: 'kǔn,mí',
0x26111: 'suì',
0x26113: 'dōu',
0x26114: 'dié',
0x2611C: 'gōng',
0x26127: 'sōu',
0x2612F: 'zhuǎn',
0x26130: 'guó',
0x2613C: 'xū',
0x2613D: 'qú',
0x2613F: 'mò',
0x26140: 'xún',
0x26143: 'jiāo,qiāo',
0x26144: 'zhé',
0x26146: 'diàn',
0x26147: 'sāng',
0x26148: 'bēng',
0x2614A: 'suǒ',
0x2614B: 'qiǎn',
0x2614F: 'xū',
0x26151: 'xún',
0x26154: 'mò',
0x26157: 'chǎn',
0x26168: 'xǔ',
0x26175: 'suì',
0x26176: 'là,liè',
0x26177: 'zhǔ,zhù',
0x26178: 'zhòu',
0x2617A: 'lì',
0x2617B: 'huǎn',
0x2617C: 'dān',
0x2617D: 'jú',
0x2617E: 'zhuó',
0x2617F: 'yùn',
0x26180: 'chǎn',
0x26181: 'luó',
0x26184: 'sè',
0x26186: 'lián',
0x26188: 'zuǎn,zuí',
0x2618B: 'lài',
0x2618C: 'shuǎng',
0x2618D: 'qiè',
0x26198: 'dōu',
0x2619E: 'wù',
0x2619F: 'méng',
0x261A1: 'jì',
0x261A4: 'chī',
0x261A6: 'nǐ',
0x261B2: 'liào',
0x261B8: 'yáo',
0x261BB: 'là',
0x261BE: 'lǜ',
0x261C0: 'suì',
0x261C1: 'fū',
0x261C4: 'lěi',
0x261C5: 'wěi',
0x261CE: 'cōng',
0x261D4: 'lì',
0x261D6: 'pín',
0x261D8: 'jūn',
0x261D9: 'jǔ',
0x261DB: 'là',
0x261E7: 'jì',
0x261EA: 'miè',
0x261EC: 'yào',
0x261ED: 'biān',
0x261F1: 'cóng',
0x261F2: 'sī,chī',
0x261F5: 'sī',
0x261F8: 'hé',
0x26203: 'nàng',
0x26205: 'dié',
0x26208: 'chè',
0x26223: 'yú',
0x26224: 'xiòng',
0x26226: 'qì',
0x26227: 'bēi',
0x26228: 'xíng',
0x26229: 'gǒng',
0x2622C: 'zuǐ',
0x26230: 'jiē',
0x26232: 'kāi,gǔ',
0x26235: 'xíng',
0x26236: 'bēi',
0x26237: 'shū',
0x26238: 'yù',
0x2623A: 'zhǒu',
0x2623B: 'zhǎn',
0x26242: 'zhōng',
0x26246: 'chá,chà',
0x26248: 'chuí',
0x26249: 'liù',
0x2624E: 'suī',
0x26250: 'zhǔ',
0x26259: 'biàn',
0x2625D: 'xìn',
0x2625F: 'yà',
0x26262: 'líng',
0x26265: 'wèng',
0x26267: 'yà',
0x2626A: 'wǎng',
0x2626C: 'tīng',
0x26279: 'dí',
0x26281: 'pí',
0x26282: 'hù',
0x26283: 'cén',
0x2628A: 'tiān',
0x2628B: 'mǒu',
0x2628C: 'juǎn',
0x2628D: 'hù',
0x2628E: 'mǒu',
0x26290: 'jù',
0x26293: 'lǐng',
0x26297: 'liǔ',
0x26298: 'hù',
0x262A6: 'fú',
0x262A7: 'hú',
0x262AA: 'è',
0x262AB: 'gōng',
0x262AC: 'gū',
0x262B9: 'lüè',
0x262BB: 'fán',
0x262BC: 'lǜ',
0x262BD: 'méng',
0x262BE: 'fú',
0x262BF: 'liú',
0x262C5: 'xié',
0x262C6: 'gū',
0x262C8: 'xiàn',
0x262C9: 'bó',
0x262CB: 'jì',
0x262D3: 'quān',
0x262D4: 'lù',
0x262DE: 'shuò',
0x262E1: 'mǒu',
0x262E2: 'yù',
0x262E3: 'hàn',
0x262E9: 'yuè',
0x262EA: 'dàn',
0x262EF: 'yú',
0x262F0: 'jiān',
0x262F3: 'gāng',
0x262F9: 'mò',
0x262FF: 'cáo',
0x26300: 'shèn',
0x26301: 'liǔ,lóu',
0x26306: 'jiāo',
0x26309: 'sù',
0x2630A: 'sù',
0x2630B: 'zhòng',
0x26312: 'liào',
0x26314: 'xuǎn',
0x26315: 'lù',
0x26317: 'jì',
0x2631A: 'yán',
0x2631F: 'lù',
0x26321: 'mǐn',
0x26322: 'tí',
0x26326: 'huàn',
0x26329: 'yì',
0x2632A: 'tǎn',
0x2632C: 'wǔ,wú',
0x2632D: 'jī',
0x26330: 'jī',
0x26337: 'dú',
0x26338: 'kūn',
0x2633A: 'jūn',
0x2633E: 'juàn',
0x2633F: 'shī',
0x26340: 'nàn',
0x26341: 'pò',
0x26344: 'shū',
0x26345: 'quàn',
0x2634C: 'rèn',
0x2634F: 'fén',
0x26352: 'tà',
0x26353: 'tún',
0x26355: 'yáng',
0x2635A: 'gāo',
0x26366: 'duō',
0x26367: 'cī',
0x2636A: 'fén',
0x2636D: 'róu',
0x26371: 'gāo',
0x26372: 'xiáng,yàng',
0x26374: 'xiáng',
0x26375: 'hǒu',
0x26377: 'tāo',
0x26378: 'shàn',
0x26379: 'yáng',
0x2637A: 'zì',
0x2637C: 'yuán',
0x26384: 'sú',
0x26387: 'chuàn',
0x26388: 'xiáng,xiè',
0x2638A: 'bān',
0x2638C: 'mǎn',
0x2638E: 'fǔ',
0x2638F: 'lǎ',
0x26390: 'lǐ',
0x26392: 'jié',
0x26393: 'yōu',
0x26398: 'yù',
0x2639A: 'chì',
0x2639C: 'chuàn',
0x2639D: | |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
######
Command help:
usage: train.py [-h] --train-batch-size TRAIN_BATCH_SIZE --test-batch-size
TEST_BATCH_SIZE --arch-key ARCH_KEY --dataset DATASET
--dataset-path DATASET_PATH
[--checkpoint-path CHECKPOINT_PATH] [--init-lr INIT_LR]
[--optim-args OPTIM_ARGS] [--recipe-path RECIPE_PATH]
[--sparse-transfer-learn [SPARSE_TRANSFER_LEARN]]
[--eval-mode [EVAL_MODE]] [--optim OPTIM]
[--logs-dir LOGS_DIR] [--save-best-after SAVE_BEST_AFTER]
[--save-epochs SAVE_EPOCHS]
[--use-mixed-precision [USE_MIXED_PRECISION]]
[--debug-steps DEBUG_STEPS] [--pretrained PRETRAINED]
[--pretrained-dataset PRETRAINED_DATASET]
[--model-kwargs MODEL_KWARGS]
[--dataset-kwargs DATASET_KWARGS] [--model-tag MODEL_TAG]
[--save-dir SAVE_DIR] [--device DEVICE]
[--loader-num-workers LOADER_NUM_WORKERS]
[--no-loader-pin-memory]
[--loader-pin-memory [LOADER_PIN_MEMORY]]
optional arguments:
-h, --help show this help message and exit
--train-batch-size TRAIN_BATCH_SIZE
The batch size to use while training
--test-batch-size TEST_BATCH_SIZE
The batch size to use while testing
--arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16,
mobilenet put as help to see the full list (will raise
an exception with the list)
--dataset DATASET The dataset to use for training, ex: imagenet,
imagenette, cifar10, etc. Set to imagefolder for a
generic dataset setup with an image folder structure
setup like imagenet or loadable by a dataset in
sparseml.pytorch.datasets
--dataset-path DATASET_PATH
The root path to where the dataset is stored
--checkpoint-path CHECKPOINT_PATH
A path to a previous checkpoint to load the state from
and resume the state for. If provided, pretrained will
be ignored. If using a SparseZoo recipe, can also
provide 'zoo' to load the base weights associated with
that recipe
--init-lr INIT_LR The initial learning rate to use while training, the
actual initial value used should be set by the
sparseml recipe
--optim-args OPTIM_ARGS
Additional args to be passed to the optimizer passed
in as a json object
--recipe-path RECIPE_PATH
The path to the yaml file containing the modifiers and
schedule to apply them with. Can also provide a
SparseZoo stub prefixed with 'zoo:' with an optional
'?recipe_type=' argument
--sparse-transfer-learn [SPARSE_TRANSFER_LEARN]
Enable sparse transfer learning modifiers to enforce
the sparsity for already sparse layers. The modifiers
are added to the ones to be loaded from the recipe-
path
--eval-mode [EVAL_MODE]
Puts into evaluation mode so that the model can be
evaluated on the desired dataset
--optim OPTIM The optimizer type to use, one of [SGD, Adam, RMSprop]
--logs-dir LOGS_DIR The path to the directory for saving logs
--save-best-after SAVE_BEST_AFTER
start saving the best validation result after the
given epoch completes until the end of training
--save-epochs SAVE_EPOCHS
epochs to save checkpoints at
--use-mixed-precision [USE_MIXED_PRECISION]
Trains model using mixed precision. Supported
environments are single GPU and multiple GPUs using
DistributedDataParallel with one GPU per process
--debug-steps DEBUG_STEPS
Amount of steps to run for training and testing for a
debug mode
--pretrained PRETRAINED
The type of pretrained weights to use, default is true
to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type:
[base, optim, optim-perf]. To not load any weights set
to one of [none, false]
--pretrained-dataset PRETRAINED_DATASET
The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the
default dataset for the architecture. Ex can be set to
imagenet, cifar10, etc
--model-kwargs MODEL_KWARGS
Keyword arguments to be passed to model constructor,
should be given as a json object
--dataset-kwargs DATASET_KWARGS
Keyword arguments to be passed to dataset constructor,
should be given as a json object
--model-tag MODEL_TAG
A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used
--save-dir SAVE_DIR The path to the directory for saving results
--device DEVICE The device to run on (can also include ids for data
parallel), ex: cpu, cuda, cuda:0,1
--loader-num-workers LOADER_NUM_WORKERS
The number of workers to use for data loading
--no-loader-pin-memory
Do not use pinned memory for data loading
--loader-pin-memory [LOADER_PIN_MEMORY]
Use pinned memory for data loading
#########
EXAMPLES
#########
##########
Example command for pruning resnet50 on imagenet dataset:
python integrations/pytorch/train.py \
--recipe-path ~/sparseml_recipes/pruning_resnet50.yaml \
--arch-key resnet50 --dataset imagenet --dataset-path ~/datasets/ILSVRC2012 \
--train-batch-size 256 --test-batch-size 1024
##########
Example command for transfer learning sparse mobilenet_v1 on an image folder dataset:
python integrations/pytorch/train.py \
--sparse-transfer-learn \
--recipe-path ~/sparseml_recipes/pruning_mobilenet.yaml \
--arch-key mobilenet_v1 --pretrained pruned-moderate \
--dataset imagefolder --dataset-path ~/datasets/my_imagefolder_dataset \
--train-batch-size 256 --test-batch-size 1024
##########
Template command for running training with this script on multiple GPUs using
DistributedDataParallel using mixed precision. Note - DDP support in this script
only tested for torch==1.7.0.
python -m torch.distributed.launch \
--nproc_per_node <NUM GPUs> \
integrations/pytorch/train.py \
--use-mixed-precision \
<TRAIN.PY ARGUMENTS>
"""
import argparse
import json
import os
from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple
import torch
from torch.nn import Module
from torch.utils.data import DataLoader
import utils
from argparser_.nm_argparser_ import NmArgumentParser
from sparseml import get_main_logger
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
ModuleDeviceContext,
ModuleTester,
ModuleTrainer,
default_device,
get_prunable_layers,
model_to_device,
set_deterministic_seeds,
tensor_sparsity,
)
CURRENT_TASK = utils.Tasks.TRAIN
LOGGER = get_main_logger()
@dataclass
class TrainingArguments:
"""
Represents the arguments we use in our PyTorch integration scripts for
training tasks
Using :class:`NmArgumentParser` we can turn this class into `argparse
<https://docs.python.org/3/library/argparse.html#module-argparse>`__
arguments that can be specified on the command line.
:param train_batch_size: An int representing the training batch size.
:param test_batch_size: An int representing the test batch size.
:param arch_key: A str key representing the type of model to use,
ex:resnet50.
:param dataset: The dataset to use for training, ex imagenet, imagenette,
etc; Set to `imagefolder` for a custom dataset.
:param dataset_path: Root path to dataset location.
:param local_rank: DDP argument set by PyTorch in DDP mode, default -1
:param checkpoint_path: A path to a previous checkpoint to load the state
from and resume the state for; Also works with SparseZoo recipes;
Set to zoo to automatically download and load weights associated with a
recipe.
:param init_lr: float representing the initial learning for training,
default=1e-9 .
:param optim_args: Additional arguments to be passed in to the optimizer as
a json object
:param recipe_path: The path to the yaml file containing the modifiers and
schedule to apply them with; Can also provide a SparseZoo stub prefixed
with 'zoo:'.
:param sparse_transfer_learn: Boolean to enable sparse transfer learning
modifiers to enforce
the sparsity for already sparse layers. The modifiers are added to
the ones to be loaded from the recipe-path.
:param eval_mode: bool to start evaluation mode so that the model can be
evaluated on the desired dataset.
:param optim: str respresnting the optimizer type to use, one of
[SGD, Adam, RMSprop].
:param logs_dir: The path to the directory for saving logs.
:param save_best_after: int epoch number to start saving the best
validation result after until the end of training.
:param save_epochs: int epochs to save checkpoints at.
:param use_mixed_precision: bool to train model using mixed precision.
Supported environments are single GPU and multiple GPUs using
DistributedDataParallel with one GPU per process.
:param debug_steps: int represnting amount of steps to run for training and
testing for debug mode default=-1.
:param pretrained: The type of pretrained weights to use default is true
to load the default pretrained weights for the model Otherwise should
be set to the desired weights type: [base, optim, optim-perf];
To not load any weights set to one of [none, false].
:param pretrained_dataset: str representing the dataset to load pretrained
weights for if pretrained is set; Default is None which will load the
default dataset for the architecture; Ex can be set to imagenet,
cifar10, etc".
:param model_kwargs: json object containing keyword arguments to be
passed to model constructor.
:param dataset_kwargs: json object to load keyword arguments to be passed
to dataset constructor.
:param model_tag: A str tag to use for the model for saving results
under save-dir, defaults to the model arch and dataset used.
:param save_dir: The path to the directory for saving results,
| |
<gh_stars>1-10
"""This module contains the GeneFlow Definition class."""
import copy
import pprint
import cerberus
import yaml
from geneflow.log import Log
GF_VERSION = 'v1.0'
WORKFLOW_SCHEMA = {
'v1.0': {
'gfVersion': {
'type': 'string', 'default': GF_VERSION, 'allowed': [GF_VERSION]
},
'class': {
'type': 'string', 'default': 'workflow', 'allowed': ['workflow']
},
'workflow_id': {'type': 'string', 'default': ''},
'name': {'type': 'string', 'required': True},
'description': {'type': 'string', 'required': True},
'repo_uri': {'type': 'string', 'default': ''},
'documentation_uri': {'type': 'string', 'default': ''},
'version': {'type': 'string', 'required': True},
'public': {'type': 'boolean', 'default': False},
'enable': {'type': 'boolean', 'default': True},
'test': {'type': 'boolean', 'default': False},
'username': {'type': 'string', 'default': 'user'},
'inputs': {
'type': 'dict',
'default': {},
'valueschema': {
'type': 'dict',
'required': True,
'schema': {
'label': {'type': 'string', 'required': True},
'description': {'type': 'string', 'default': ''},
'type': {
'type': 'string',
'required': True,
'default': 'Any',
'allowed': ['File', 'Directory', 'Any']
},
'default': {'type': 'string', 'default': ''},
'enable': {'type': 'boolean', 'default': True},
'visible': {'type': 'boolean', 'default': True},
'value': {'type': 'string', 'default': ''}
}
}
},
'parameters': {
'type': 'dict',
'default': {},
'valueschema': {
'type': 'dict',
'required': True,
'schema': {
'label': {'type': 'string', 'required': True},
'description': {'type': 'string', 'default': ''},
'type': {
'type': 'string',
'required': True,
'default': 'Any',
'allowed': [
'File', 'Directory', 'string', 'int',
'float', 'double', 'long', 'Any'
]
},
'default': {'nullable': True, 'default': None},
'enable': {'type': 'boolean', 'default': True},
'visible': {'type': 'boolean', 'default': True},
'value': {'nullable': True, 'default': None}
}
}
},
'final_output': {
'type': 'list', 'schema': {'type': 'string'}, 'default': []
},
'steps': {
'type': 'dict',
'required': True,
'valueschema': {
'type': 'dict',
'required': True,
'schema': {
'step_id': {'type': 'string', 'default': ''},
'name': {'type': 'string', 'default': ''},
'app_id': {'type': 'string', 'default': ''},
'app_name': {
'type': 'string',
'required': True,
'excludes': 'app'
},
'app': {
'type': 'string',
'required': True,
'excludes': 'app_name'
},
'depend': {'type': 'list', 'default': []},
'number': {'type': 'integer', 'default': 0},
'letter': {'type': 'string', 'default': ''},
'map': {
'type': 'dict',
'default': {'uri': '', 'regex': ''},
'schema': {
'uri': {'type': 'string', 'default': ''},
'regex': {'type': 'string', 'default': ''}
}
},
'template': {
'type': 'dict',
'allow_unknown': True,
'schema': {
'output': {'type': 'string', 'required': True}
}
},
'execution': {
'type': 'dict',
'default': {'context': 'local', 'method': 'auto'},
'schema': {
'context': {
'type': 'string',
'default': 'local',
'allowed': ['local', 'agave']
},
'method': {
'type': 'string',
'default': 'auto',
'allowed': [
'auto',
'package',
'cdc-shared-package',
'singularity',
'cdc-shared-singularity',
'docker',
'environment',
'module'
]
}
}
}
}
}
}
}
}
APP_SCHEMA = {
'v1.0': {
'gfVersion': {
'type': 'string',
'default': GF_VERSION,
'allowed': [GF_VERSION]
},
'class': {
'type': 'string',
'default': 'app',
'allowed': ['app']
},
'app_id': {'type': 'string', 'default': ''},
'name': {'type': 'string', 'required': True},
'description': {'type': 'string', 'maxlength': 64, 'required': True},
'repo_uri': {'type': 'string', 'default': ''},
'version': {'type': 'string', 'default': ''},
'public': {'type': 'boolean', 'default': True},
'username': {'type': 'string', 'default': 'user'},
'inputs': {
'type': 'dict',
'default': {},
'valueschema': {
'type': 'dict',
'required': True,
'schema': {
'label': {'type': 'string', 'required': True},
'description': {'type': 'string', 'default': ''},
'type': {
'type': 'string',
'required': True,
'default': 'Any',
'allowed': ['File', 'Directory', 'Any']
},
'default': {'type': 'string', 'default': ''},
'value': {'type': 'string', 'default': ''}
}
}
},
'parameters': {
'type': 'dict',
'default': {},
'valueschema': {
'type': 'dict',
'required': True,
'schema': {
'label': {'type': 'string', 'required': True},
'description': {'type': 'string', 'default': ''},
'type': {
'type': 'string',
'required': True,
'default': 'Any',
'allowed': [
'File', 'Directory', 'string', 'int',
'float', 'double', 'long', 'Any'
]
},
'default': {'nullable': True, 'default': None},
'value': {'nullable': True, 'default': None}
}
}
},
'definition': {
'type': 'dict',
'required': True,
'valueschema': {'type': 'dict'}
}
}
}
JOB_SCHEMA = {
'v1.0': {
'gfVersion': {
'type': 'string',
'default': GF_VERSION,
'allowed': [GF_VERSION]
},
'class': {
'type': 'string',
'default': 'job',
'allowed': ['job']
},
'job_id': {'type': 'string', 'default': ''},
'username': {'type': 'string', 'default': 'user'},
'name': {'type': 'string', 'required': True},
'workflow_id': {'type': 'string', 'default': ''},
'workflow_name': {'type': 'string', 'default': ''},
'output_uri': {'type': 'string', 'required': True},
'work_uri': {
'type': 'dict',
'required': True,
'valueschema': {'type': 'string'}
},
'no_output_hash': {
'type': 'boolean',
'default': False,
'coerce': (lambda s: str(s).lower() in ['true','yes','1'])
},
'inputs': {
'type': 'dict', 'default': {}, 'valueschema': {'type': 'string'}
},
'parameters': {
'type': 'dict', 'default': {}
},
'final_output': {
'type': 'list', 'schema': {'type': 'string'}, 'default': []
},
'execution': {
'type': 'dict',
'default': {
'context': {'default': 'local'},
'method': {'default': 'auto'}
},
'schema': {
'context': {
'type': 'dict',
'default': {'default': 'local'},
'allow_unknown': True,
'schema': {
'default': {
'type': 'string',
'default': 'local',
}
},
'valueschema': {
'type': 'string',
'default': 'local',
'allowed': ['local', 'agave']
}
},
'method': {
'type': 'dict',
'default': {'default': 'auto'},
'allow_unknown': True,
'schema': {
'default': {
'type': 'string',
'default': 'auto'
}
},
'valueschema': {
'type': 'string',
'default': 'auto',
'allowed': [
'auto',
'package',
'cdc-shared-package',
'singularity',
'cdc-shared-singularity',
'docker',
'environment',
'module'
]
}
}
}
},
'notifications': {
'type': 'list',
'default': [],
'schema': {
'type': 'dict',
'default': {},
'schema': {
'url': {'type': 'string', 'required': True},
'to': {
'anyof': [
{'type': 'string', 'required': True},
{'type': 'list', 'required': True}
],
},
'events': {'type': 'string', 'default': '*'}
}
}
}
}
}
class Definition:
"""
GeneFlow Definition class.
The Definition class is used to load and validate workflow
definition YAML file and job definition YAML file.
"""
def __init__(self):
"""Initialize Definition class with default values."""
self._apps = {}
self._workflows = {}
self._jobs = {}
@classmethod
def load_yaml(cls, yaml_path):
"""
Load a multi-doc yaml file.
Read a multi-doc yaml file and return a list of dicts. Only basic YAML
validation is performed in this method.
Args:
yaml_path: path to multi-doc YAML file.
Returns:
List of dicts.
"""
try:
with open(yaml_path, 'rU') as yaml_file:
yaml_data = yaml_file.read()
except IOError as err:
Log.an().error(
'cannot read yaml file: %s [%s]', yaml_path, str(err)
)
return False
try:
yaml_dict = list(yaml.safe_load_all(yaml_data))
except yaml.YAMLError as err:
Log.an().error('invalid yaml: %s [%s]', yaml_path, str(err))
return False
return yaml_dict
def load(self, yaml_path):
"""
Load and validate GeneFlow definition from a multi-doc YAML file.
Read a GeneFlow definition file, which can contain apps, workflows,
and jobs. Loaded docs are appended to the _apps, _workflows, and _jobs
arrays. Load may be called multiple times. Docs are only added if
successfully validated.
Args:
yaml_path: path to GeneFlow YAML definition file.
Returns:
On success: True
On failure: False.
"""
# load multi-doc yaml file
gf_def = self.load_yaml(yaml_path)
if gf_def is False:
Log.an().error('cannot load yaml file: %s', yaml_path)
return False
# iterate through yaml docs
for gf_doc in gf_def:
# class must be specified, either app or workflow
if 'class' not in gf_doc:
Log.a().error('unspecified document class')
return False
if gf_doc['class'] == 'app':
if 'apps' in gf_doc:
# this is a list of apps
for app in gf_doc['apps']:
if not self.add_app(app):
Log.an().error(
'invalid app in definition: %s', yaml_path
)
return False
else:
# only one app
if not self.add_app(gf_doc):
Log.an().error(
'invalid app in definition: %s', yaml_path
)
return False
elif gf_doc['class'] == 'workflow':
# only one workflow per yaml file allowed
if not self.add_workflow(gf_doc):
Log.an().error(
'invalid workflow in definition: %s', yaml_path
)
return False
elif gf_doc['class'] == 'job':
if 'jobs' in gf_doc:
# this is a list of jobs
for job in gf_doc['jobs']:
if not self.add_job(job):
Log.an().error(
'invalid job in definition: %s', yaml_path
)
return False
else:
# only one job
if not self.add_job(gf_doc):
Log.an().error(
'invalid job in definition: %s', yaml_path
)
return False
else:
Log.a().error('invalid document class: %s', gf_doc['class'])
return False
return True
@classmethod
def validate_app(cls, app_def):
"""Validate app definition."""
validator = cerberus.Validator(APP_SCHEMA[GF_VERSION])
valid_def = validator.validated(app_def)
if not valid_def:
Log.an().error(
'app validation error:\n%s',
pprint.pformat(validator.errors)
)
return False
return valid_def
@classmethod
def calculate_step_numbering(cls, workflow_dict):
"""
Calculate step numbering for a workflow.
Use a topological sort algorithm to calculate step number and validate
the DAG. Return a workflow dict with populated 'number' and 'letter'
numbering.
Args:
workflow_dict: Dict of workflow to number.
Returns:
On success: A workflow dict with step numbers.
On failure: False.
"""
# initial step number
number = 1
steps = workflow_dict['steps']
# indicate if step has been traversed
step_status = {}
for step in steps:
step_status[step] = False
all_done = False
while not all_done:
all_done = True
steps_done = [] # steps to be set to done on next iter
| |
from __future__ import annotations
import asyncio
import base64
import datetime
from http import HTTPStatus
import json
import logging
import os
from pathlib import Path
try:
# Linux expects the latest package version of 3.35.4 (as of pysqlite-binary 0.4.6)
import pysqlite3 as sqlite3
except ModuleNotFoundError:
# MacOS has latest brew version of 3.35.5 (as of 2021-06-20).
# Windows builds use the official Python 3.10.0 builds and bundled version of 3.35.5.
import sqlite3 # type: ignore
import aiohttp
from aiohttp import web, WSServerHandshakeError
from bitcoinx import PrivateKey, PublicKey
from electrumsv_database.sqlite import replace_db_context_with_connection
import pytest
import requests
from esv_reference_server.application_state import ApplicationState
from esv_reference_server.errors import WebsocketUnauthorizedException
from esv_reference_server import sqlite_db
from .conftest import _wrong_auth_type, _bad_token, _successful_call, _no_auth, \
_subscribe_to_general_notifications_peer_channels, TEST_EXTERNAL_HOST, TEST_EXTERNAL_PORT, \
WS_URL_GENERAL
WS_URL_TEMPLATE_MSG_BOX = "ws://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/{channelid}/notify"
PRIVATE_KEY_1 = PrivateKey.from_hex(
"720f1987db69efa562b3dabd78e51f19bd8da76c70ad839b72b939f4071b144b")
PUBLIC_KEY_1: PublicKey = PRIVATE_KEY_1.public_key
REF_TYPE_OUTPUT = 0
REF_TYPE_INPUT = 1
STREAM_TERMINATION_BYTE = b"\x00"
MODULE_DIR = Path(os.path.dirname(os.path.abspath(__file__)))
CHANNEL_ID: str = ""
CHANNEL_BEARER_TOKEN: str = ""
CHANNEL_BEARER_TOKEN_ID: int = 0
CHANNEL_READ_ONLY_TOKEN: str = ""
CHANNEL_READ_ONLY_TOKEN_ID: int = 0
class TestAiohttpRESTAPI:
logger = logging.getLogger("test-aiohttp-rest-api")
_account_id: int
_api_key: str
@classmethod
def setup_class(cls) -> None:
assert ApplicationState.singleton_reference is not None
application_state = ApplicationState.singleton_reference()
assert application_state is not None
cls._account_id, cls._api_key = application_state.database_context.run_in_thread(
sqlite_db.create_account, PUBLIC_KEY_1.to_bytes(compressed=True))
def setup_method(self) -> None:
pass
def teardown_method(self) -> None:
pass
@classmethod
def teardown_class(cls) -> None:
pass
async def _create_new_channel(self) -> tuple[str, str, str]:
URL = "http://{host}:{port}/api/v1/channel/manage".format(host=TEST_EXTERNAL_HOST,
port=TEST_EXTERNAL_PORT)
request_body = {
"public_read": True,
"public_write": True,
"sequenced": True,
"retention": {
"min_age_days": 0,
"max_age_days": 0,
"auto_prune": True
}
}
self.logger.debug("test_create_new_channel url: %s", URL)
async with aiohttp.ClientSession() as session:
headers = {"Authorization": f"Bearer {self._api_key}"}
async with session.post(URL, headers=headers, json=request_body) as resp:
self.logger.debug("resp.content = %s", resp.content)
assert resp.status == 200, resp.reason
single_channel_data = await resp.json()
CHANNEL_ID = single_channel_data['id']
CHANNEL_BEARER_TOKEN = single_channel_data['access_tokens'][0]['token']
CHANNEL_BEARER_TOKEN_ID = single_channel_data['access_tokens'][0]['id']
return CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID
async def _create_read_only_token(self, CHANNEL_ID: str) -> tuple[str, str]:
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}/api-token"
request_body = {
"description": "websocket read only token",
"can_read": True,
"can_write": False
}
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_create_new_token_for_channel url: %s", url)
async with aiohttp.ClientSession() as session:
headers = {"Authorization": f"Bearer {self._api_key}"}
async with session.post(url, headers=headers, json=request_body) as resp:
self.logger.debug("resp.content = %s", resp.content)
assert resp.status == 200, resp.reason
response_body = await resp.json()
CHANNEL_READ_ONLY_TOKEN_ID = response_body['id']
CHANNEL_READ_ONLY_TOKEN = response_body['token']
return CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN
@pytest.mark.asyncio
def test_ping(self) -> None:
URL = "http://{host}:{port}/".format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT)
result = requests.get(URL)
assert result.text is not None
@pytest.mark.asyncio
def test_create_new_channel(self) -> None:
URL = 'http://{host}:{port}/api/v1/channel/manage'.format(host=TEST_EXTERNAL_HOST,
port=TEST_EXTERNAL_PORT)
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = {
"public_read": True,
"public_write": True,
"sequenced": True,
"retention": {
"min_age_days": 0,
"max_age_days": 0,
"auto_prune": True
}
}
self.logger.debug("test_create_new_channel url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
single_channel_data = response_body
CHANNEL_ID = single_channel_data['id']
assert single_channel_data['href'] == \
f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
assert single_channel_data['public_read'] is True
assert single_channel_data['public_write'] is True
assert single_channel_data['sequenced'] is True
assert single_channel_data['retention'] == {"min_age_days": 0, "max_age_days": 0, \
"auto_prune": True}
assert isinstance(single_channel_data['access_tokens'], list)
assert single_channel_data['access_tokens'][0]['id'] == 1
issued_token_bytes = \
base64.urlsafe_b64decode(single_channel_data['access_tokens'][0]['token'])
assert len(issued_token_bytes) == 64
assert single_channel_data['access_tokens'][0]['description'] == "Owner"
assert single_channel_data['access_tokens'][0]['can_read'] is True
assert single_channel_data['access_tokens'][0]['can_write'] is True
@pytest.mark.asyncio
async def test_create_new_token_for_channel(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: create_new_token_for_channel
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}/api-token"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = {
"description": "some description",
"can_read": True,
"can_write": False
}
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_create_new_token_for_channel url: %s", url)
result = _successful_call(url, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
assert len(base64.urlsafe_b64decode(response_body['token'])) == 64
expected_response_body = {
"id": 3,
"token": response_body['token'],
"description": "some description",
"can_read": True,
"can_write": False
}
assert response_body == expected_response_body
@pytest.mark.asyncio
def test_list_channels(self) -> None:
# handler: list_channels
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/list"
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
self.logger.debug("test_list_channels url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
assert isinstance(response_body, list)
assert len(response_body) == 2
for single_channel_data in response_body:
# assert single_channel_data['href'] == \
# f"http://{TEST_HOST}:{TEST_PORT}/api/v1/channel/{CHANNEL_ID}"
assert single_channel_data['public_read'] is True
assert single_channel_data['public_write'] is True
assert single_channel_data['sequenced'] is True
assert single_channel_data['retention'] == {"min_age_days": 0, "max_age_days": 0,
"auto_prune": True}
assert isinstance(single_channel_data['access_tokens'], list)
assert isinstance(single_channel_data['access_tokens'][0]['id'], int)
issued_token_bytes = base64.urlsafe_b64decode(
single_channel_data['access_tokens'][0]['token'])
assert len(issued_token_bytes) == 64
# assert single_channel_data['access_tokens'][0]['token'] == CHANNEL_BEARER_TOKEN
assert single_channel_data['access_tokens'][0]['description'] == "Owner"
assert single_channel_data['access_tokens'][0]['can_read'] is True
assert single_channel_data['access_tokens'][0]['can_write'] is True
@pytest.mark.asyncio
async def test_get_single_channel_details(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: get_single_channel_details
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}"
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_get_single_channel_details url: %s", url)
result = _successful_call(url, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
single_channel_data = response_body
assert single_channel_data['href'] == \
f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
assert single_channel_data['public_read'] is True
assert single_channel_data['public_write'] is True
assert single_channel_data['sequenced'] is True
assert single_channel_data['retention'] == {"min_age_days": 0, "max_age_days": 0,
"auto_prune": True}
assert isinstance(single_channel_data['access_tokens'], list)
assert isinstance(single_channel_data['access_tokens'][0]['id'], int)
issued_token_bytes = \
base64.urlsafe_b64decode(single_channel_data['access_tokens'][0]['token'])
assert len(issued_token_bytes) == 64
assert single_channel_data['access_tokens'][0]['description'] == "Owner"
assert single_channel_data['access_tokens'][0]['can_read'] is True
assert single_channel_data['access_tokens'][0]['can_write'] is True
@pytest.mark.asyncio
async def test_update_single_channel_properties(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: update_single_channel_properties
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = {
"public_read": True,
"public_write": True,
"locked": False
}
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_update_single_channel_properties url: %s", url)
result = _successful_call(url, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
assert response_body == request_body
@pytest.mark.asyncio
async def test_get_token_details(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
expected_response_body = {
"id": CHANNEL_READ_ONLY_TOKEN_ID,
"token": CHANNEL_READ_ONLY_TOKEN,
"description": "websocket read only token",
"can_read": True,
"can_write": False
}
# handler: get_token_details
URL = 'http://{host}:{port}/api/v1/channel/manage/{channelid}/api-token/{tokenid}'\
.format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID,
tokenid=CHANNEL_READ_ONLY_TOKEN_ID)
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
self.logger.debug("test_get_token_details url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
self.logger.debug(json.dumps(response_body, indent=4))
assert response_body == expected_response_body
@pytest.mark.asyncio
async def test_get_list_of_tokens(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
expected_response_body = [
{
"id": CHANNEL_BEARER_TOKEN_ID,
"token": CHANNEL_BEARER_TOKEN,
"description": "Owner",
"can_read": True,
"can_write": True
},
{
"id": CHANNEL_READ_ONLY_TOKEN_ID,
"token": <PASSWORD>NEL_READ_ONLY_TOKEN,
"description": "websocket read only token",
"can_read": True,
"can_write": False
}
]
# handler: get_list_of_tokens
URL = 'http://{host}:{port}/api/v1/channel/manage/{channelid}/api-token'\
.format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID)
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
self.logger.debug("test_get_list_of_tokens url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
self.logger.debug(json.dumps(response_body, indent=4))
assert response_body == expected_response_body
# MESSAGE MANAGEMENT APIS - USE CHANNEL-SPECIFIC BEARER TOKEN NOW
@pytest.mark.asyncio
async def test_write_message_no_content_type_should_raise_400(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: write_message
URL = 'http://{host}:{port}/api/v1/channel/{channelid}'.format(host=TEST_EXTERNAL_HOST,
port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID)
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD, headers={'Content-Type': 'application/json'})
request_body = {"key": "value"}
self.logger.debug("test_write_message_no_content_type_should_raise_400 url: %s", URL)
headers = {
"Content-Type": "",
}
result = _successful_call(URL, HTTP_METHOD, headers, request_body, CHANNEL_BEARER_TOKEN)
assert result.status_code == HTTPStatus.BAD_REQUEST, result.reason
assert result.reason is not None
@pytest.mark.asyncio
async def test_write_message_read_only_token_should_fail(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
headers = {}
headers["Content-Type"] = "application/json"
request_body = {
"key": "value"
}
# handler: write_message
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD, headers={'Content-Type': 'application/json'})
self.logger.debug("test_write_message_read_only_token_should_fail url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, headers,
request_body, CHANNEL_READ_ONLY_TOKEN)
assert result.status_code == 401, result.reason
def _write_message(self, CHANNEL_ID: str, CHANNEL_BEARER_TOKEN: str) -> requests.Response:
headers = {}
headers["Content-Type"] = "application/json"
request_body = {
"key": "value"
}
# handler: write_message
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD, headers={'Content-Type': 'application/json'})
self.logger.debug("test_write_message url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, headers,
request_body, CHANNEL_BEARER_TOKEN)
assert result.status_code == 200, result.reason
return result
@pytest.mark.asyncio
async def test_write_message(self) -> None:
"""Uses CHANNEL_BEARER_TOKEN to write messages for the CHANNEL_READ_ONLY_TOKEN to read."""
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
headers = {}
headers["Content-Type"] = "application/json"
request_body = {
"key": "value"
}
# handler: write_message
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
HTTP_METHOD = | |
from itertools import product, chain
from csv import writer
from os import path
import re
class UtterMore:
def __init__(self, *utterance_templates):
"""
A class to create utterances for a custom skill for Amazon's Alexa. It
can be a tedious process if verbosity is desired because language is so
flexible. So this will automatically creates all the utterances you want
based on (a) given utterance template(s).
There are three ways to format a template and they are as follows:
(a|b|c|...) - [OR] Used if you want to allow multiple interchangeable
words. For example, if photo, picture and painting are
interchangeable in your utterances, then write
(photo|picture|painting) in the place where it would be. The number
of words to OR is arbitrary and single curly keywords like
{intent_slot} can be used in this.
(a*1|b*2) (c^1|d^2) - [CONDITIONAL OR] The * defines a master with a tag
of whatever follows the * while the ^ defines a follower of the tag
of whatever follows the ^. So utterances with a word tagged with
^sample will only be returned if the utterance also has a word
tagged with *sample. The above will display 'a c' OR 'b d'.
{{slot}} - [OPTIONAL INTENT SLOT] Used if the existence of an intent
slot in your utterance is optional. For example, if you have an
optional adverb you may write I {adverb} love it or just I love it.
Instead you can write I {{adverb}} love it to capture both.
For example, the template
"What (is*singular|are*plural) (that^singular|those^plural) {{things}}"
will return the following utterances:
['What is that {things}',
'What is that',
'What are those {things}',
'What are those']
An arbitrary number of utterance templates can be passed to the class.
Or utterance templates can be passed as a solo argument to
self.build_utterances.
Parameters:
utterance_templates - Arbitrary number of utterance templates. Their
respective utterance can be created by running
self.iter_build_utterances which will save them
in self.utterances
"""
# Handle a combination of lists and strings being passed
self.utterance_templates = list(chain.from_iterable(
[[template] if isinstance(template, str) else template
for template in utterance_templates]))
self.utterances = []
def iter_build_utterances(self):
"""
Iteratively runs self.build_utterances for every utterance template
given in the initialization (in self.utterance_templates) and stores
the resulting utterances in self.utterances as a two-dimensional list
where each list element is a list of all the utterances for a single
template.
"""
for utterance_template in self.utterance_templates:
self.utterances.append(self.build_utterances(utterance_template))
@staticmethod
def _order_curlies(*curlies):
"""
Orders the curlies in a list based on where they should appear in the
template and prepares it for adding to template.
"""
# Create dictionary mapping where the above occur to the occurance
all_curlies = chain.from_iterable(curlies)
indexed_curlies = {curly.start(0): curly.group(0) for curly in all_curlies}
ordered_curlies = []
for ind in sorted(indexed_curlies.keys()):
curly = indexed_curlies[ind]
# Double curlies are either single curlies or nothing
if curly.startswith('{{'):
ordered_curlies.append([curly[1:-1], ''])
# These are a choice of the words separated by the pip
elif curly.startswith('('):
ordered_curlies.append(curly[1:-1].split('|'))
return ordered_curlies
@staticmethod
def _fill_in_template(template, ordered_curlies):
"""
Given a template to fill and an ordered list of curlies created by
self._order_curlies to fill it, it does just that.
"""
# Fill in template with every combination
utterances = []
for edit in product(*ordered_curlies):
skip_edit = False
# First get the masters (OR keywords with the *)
masters = set()
for kw in edit:
# Will be empty list if find no * followed by the tag
found_master = re.findall(r'\*(\w+)', kw)
# If not empty list, add that to masters set
if found_master:
masters.add(found_master[0])
# Find the followers and see if they match up with the masters
for kw in edit:
# Same idea as with finding the masters/a master
found_follower = re.findall(r'\^(\w+)', kw)
# Don't add this edit to utterances if it has a follower that
# isn't in the masters set
if found_follower and found_follower[0] not in masters:
skip_edit = True
continue
# If all good, add it!
if not skip_edit:
# Remove the OR conditional stuff to clean it
cleaned_edit = [x.split('*')[0].split('^')[0] for x in edit]
# The join/split nonsense removes excess whitespace
utterances.append(' '.join(template.format(*cleaned_edit).split()))
return utterances
def build_utterances(self, utterance_template):
"""
Returns the made utterances given an utterance template. It supports
the following substitutions:
(a|b|c|...) - [OR] This wil place a OR b OR c OR etc. in its place
{{slot}} - [OPTIONAL SLOT] This will place the slot {slot} or nothing
in its place.
(a*1|b*2) (c^1|d^2) - [CONDITIONAL OR] The * defines a master with a tag
of whatever follows the * while the ^ defines a follower of the tag
of whatever follows the ^. So utterances with a word tagged with
^sample will only be returned if the utterance also has a word
tagged with *sample. The above will display 'a c' OR 'b d'. If we
have multiple masters, then the follower(s) will appear if at least
one master is present. And alternatively, you can treat multiple
followers as a CONDITIONAL AND.
For example, the template
"What (is*singular|are*plural) (that^singular|those^plural) {{things}}"
will return the following utterances:
['What is that {things}',
'What is that',
'What are those {things}',
'What are those']
Parameters:
utterance_template - The template the utterances are created from
"""
# Find every double bracketed keyword
double_curlies = re.finditer(r'({{[^{}]*}})', utterance_template)
# Find every single parenthesis keyword
or_curlies = re.finditer(r'(\([^()]*\))', utterance_template)
# Below turns "What (is|are) (that|those) {{things}} {place}?" into:
# "What {} {} {} {{place}}?"
# Finds the above keywords and replaces with {} for formatting
template = re.sub(r'{{[^{}]*}}|\([^()]*\)', '{}', utterance_template)
# Turns {...} into {{...}} to literalize the curlies
template = re.sub(r'\{[\w]+\}', lambda x: '{' + x.group(0) + '}', template)
# Creates ordered list of curlies based on their appearance in template
ordered_curlies = self._order_curlies(double_curlies, or_curlies)
# Fills in template based on logic given by utterance template
return self._fill_in_template(template, ordered_curlies)
def add_utterance_template(self, utterance_template):
"""
Adds another utterance template to the current list of them.
Parameters:
utterance_template - Template to add to current list of templates
"""
self.utterance_templates.append(utterance_template)
def save_utterances(self, fpath, name, saved_as, force=False, written_as=None):
"""
Saves the current utterances to a file.
Parameters:
fpath - Path to the directory in which to save the file
name - Name of the to be saved file
saved_as - File type, file extension to save as (e.g. 'txt' or 'csv')
force - (default False) If True, will automatically make the file. If a
file of the same name exists, it will overwrite it. If False,
it will throw an error of the file already exists.
written_as - (default None) What type of file to be written as. If no
argument is given, then it will be written as what it is
saved as. For example, if we put saved_as='txt' and
written_as='csv', then the save file will have a .txt
extension but will be written as comma-separated values
like a CSV. Amazon's Alexa requires a CSV file but with
line separated values, so self.save_for_alexa uses
saved_as='csv' and written_as='txt'
"""
# Allows saving with one file extension but as another
written_as = written_as or saved_as
# Create full path name
full_path = path.join(fpath, name + '.' + saved_as)
# Check if file already exists
if path.exists(full_path) and not force:
raise Exception('File already exists and force=False. ' +
'Set force=True to overwrite file.')
# Check if unsupported file type
if saved_as not in ['csv', 'txt']:
raise Exception("File type '{}' is not supported.".format(ftype_or))
# Open file and add every utterance
with open(full_path, 'w', newline='') as f:
if written_as == 'txt':
first_line = True
for utterance in chain.from_iterable(self.utterances):
# To write '\n'-separated but without one at start or end
if first_line:
first_line = False
f.write('{}'.format(utterance.strip()))
else:
f.write('\n{}'.format(utterance.strip()))
elif written_as == 'csv':
csv_writer = writer(f)
csv_writer.writerow(chain.from_iterable(self.utterances))
def save_for_alexa(self, fpath, name, force=False):
"""
Creates CSV in the format that Alexa needs (instead of comma-separated,
it's new line-separated otherwise it won't upload correctly).
Parameters:
fpath - | |
== 'NEW':
status = _TreeShr._TreeOpenNew(_C.byref(self.ctx),
_C.c_char_p(_ver.tobytes(self.tree)),
_C.c_int32(self.shot))
else:
raise TypeError('Invalid mode specificed, use "normal","edit","new" or "readonly".')
_exc.checkStatus(status)
if status!=_exc.TreeALREADY_OPEN.status:
self.opened = True # only update if tree was not open before
if not isinstance(self.ctx,_C.c_void_p) or self.ctx.value is None:
raise _exc.MDSplusERROR
self.tctx = _TreeCtx(self.ctx.value,self.opened,tree=self.tree,shot=self.shot,mode=mode)
self.tree = self.name
self.shot = self.shotid
finally:
if not self.path is None:
_mds.setenv(env_name,old_path)
_TreeCtx.lock.release()
def __init__(self, tree=None, shot=-1, mode='NORMAL', path=None):
"""Create a Tree instance. Specify a tree and shot and optionally a mode.
If providing the mode argument it should be one of the following strings:
'Normal','Edit','New','ReadOnly'.
If no arguments provided, create instance of the active tree. (i.e. Tree())
@param tree: Name of tree to open
@type tree: str
@param shot: Shot number
@type shot: int
@param mode: Optional mode, one of 'Normal','Edit','New','Readonly'
@type mode: str
"""
if tree is None:
ctx = _TreeCtx.getDbid()
if ctx is None:
raise _exc.TreeNOT_OPEN
self.ctx=_C.c_void_p(ctx)
if not isinstance(self.ctx,_C.c_void_p) or self.ctx.value is None:
raise _exc.MDSplusERROR
self.opened = False
self.tctx = _TreeCtx(self.ctx.value,self.opened,tree=str(tree),shot=shot,mode=mode)
self.tree = self.name
self.shot = self.shotid
else:
if path is not None: self.path = path
self.tree = tree
self.shot = shot
self.opened = True
self.open(mode)
# support for the with-structure
def __enter__(self):
""" referenced if using "with Tree() ... " block"""
return self
def __del__(self):
if self.opened and self.tctx and self.ctx:
self.__exit__()
def __exit__(self, *args):
""" Cleanup for with statement. If tree is open for edit close it. """
try:
if self.open_for_edit:
self.quit()
else:
self.close()
except: pass
if self.tctx:
del(self.tctx,self.ctx)
def quit(self):
"""Close edit session discarding node structure and tag changes.
@rtype: None
"""
with self._lock:
_exc.checkStatus(
_TreeShr._TreeQuitTree(_C.byref(self.ctx),
_C.c_char_p(_ver.tobytes(self.tree)),
_C.c_int32(int(self.shot))))
def close(self):
"""Close tree.
@rtype: None
"""
_exc.checkStatus(
_TreeShr._TreeClose(_C.byref(self.ctx),
_C.c_char_p(_ver.tobytes(self.tree)),
_C.c_int32(self.shot)))
########### Tree instance properties #######################
@property
def name(self):
"Tree name"
return self.getDbi("name")
treename=name
expt=name
#tree=name
@property
def shotid(self):
"Shot number of tree"
return self.getDbi("shotid")
#shot=shotid
@property
def modified(self):
"True if open for edit and modifications made to tree structure."
return self.getDbi("modified")
@property
def open_for_edit(self):
"True if tree is opened for edit"
return self.getDbi("open_for_edit")
@property
def number_opened(self):
"Number of open trees on tree stack"
return self.getDbi("number_opened")
@property
def max_open(self):
"Max number of trees to keep open on stack (settable)"
return self.getDbi("max_open")
@max_open.setter
def max_open(self,value):
self.setDbi("max_open",value)
@property
def default(self):
"current default node position in tree (settable)"
return self.getDefault()
@default.setter
def default(self,treenode):
self.setDefault(treenode)
@property
def open_readonly(self):
"True of tree is open readonly"
return self.getDbi("open_readonly")
@property
def versions_in_model(self):
"Support versioning of data in model. (settable)"
return self.getDbi("versions_in_model")
@versions_in_model.setter
def versions_in_model(self,value):
self.setDbi("versions_in_model",value)
@property
def versions_in_pulse(self):
"Support versioning of data in pulse. (settable)"
return self.getDbi("versions_in_pulse")
@versions_in_pulse.setter
def versions_in_pulse(self,value):
self.setDbi("versions_in_pulse",value)
def getDbi(self,itemname):
"""
Get tree information such as:
treename/name/expt - name of the tree
shotid/shot - shot number
default/pwd - default node
modified - true if modified during edit
open_for_edit - true if tree is opened for edit
open_readonly - true if tree is opened readonly
versions_in_model - true if data versions is enabled in the model
versions_in_pulse - true if data versions is enabled in the pulse
itemname can be a single string or a list/tuple of strings
"""
if isinstance(itemname,(list,tuple)):
ans={}
for item in itemname:
ans[item]=self.getDbi(item)
return ans
else:
itemlist={'NAME':(1,str,12),'EXPT':(1,str,12),
'SHOTID':(2,int),'SHOT':(2,int),
'MODIFIED':(3,bool),
'OPEN_FOR_EDIT':(4,bool),
'DEFAULT':(8,str,256),
'OPEN_READONLY':(9,bool),
'VERSIONS_IN_MODEL':(10,bool),
'VERSIONS_IN_PULSE':(11,bool)}
try:
item=itemlist[itemname.upper()]
except KeyError:
raise KeyError('Item name must be one of %s' % list(itemlist.keys()))
if item[1] is str:
ans=_C.c_char_p(_ver.tobytes('x'.rjust(item[2])))
retlen=_C.c_int32(0)
itmlst=_DBI_ITM_CHAR(item[0],item[2],ans,retlen)
else:
ans=_C.c_int32(0)
itmlst=_DBI_ITM_INT(item[0],ans)
_exc.checkStatus(
_TreeShr._TreeGetDbi(self.ctx,
_C.byref(itmlst)))
if item[1] is str:
return _ver.tostr(ans.value)
else:
return item[1](ans.value)
@property
def top(self): # compatibility
"Tree root"
return TreeNode(0,self)
def __getattr__(self,name):
"""Support for referencing an immediate child or
member of current default node of the tree by
specifying an uppercase property. For example:
t = Tree('mytree',shot)
node = t.NODENAME
If the tree has a top leve child or member with
the name "NODENAME" t.NODENAME will return a
TreeNode instance."""
if name=='tctx': raise _exc.TreeNOT_OPEN
try:
return _getNodeByAttr(self,name)
except _exc.TreeNNF:
raise AttributeError('No such attribute: '+name)
@staticmethod
def usingPrivateCtx():
return bool(_TreeShr.TreeUsingPrivateCtx())
@staticmethod
def usePrivateCtx(on=True):
if on:
val=_C.c_int32(1)
else:
val=_C.c_int32(0)
return _TreeShr.TreeUsePrivateCtx(val)
def __deepcopy__(self,memo):
return self
def __eq__(self,obj):
if isinstance(obj,(Tree,)):
return self.ctx.value == obj.ctx.value
return False
def __ne__(self,obj): return not self.__eq__(obj)
def __repr__(self):
"""Return representation
@return: String representation of open tree
@rtype: str
"""
try:
if self.open_for_edit:
mode="Edit"
elif self.open_readonly:
mode="Readonly"
else:
mode="Normal"
except _exc.TreeNOT_OPEN:
mode="Closed"
return self.__class__.__name__+'("%s",%d,"%s")' % (self.tree,self.shot,mode)
__str__=__repr__
def addDevice(self,nodename,model):
"""Add a device to the tree of the specified device model type.
@param nodename: Absolute or relative path specification of the head node of the device.
All ancestors of node must exist.
@type nodename: str
@param model: Model name of the device being added.
@type model: str
@return: Head node of device added
@rtype: TreeNode
"""
nid=_C.c_int32(0)
with self._lock:
_exc.checkStatus(
_TreeShr._TreeAddConglom(self.ctx,
_ver.tobytes(nodename),
_ver.tobytes(model),
_C.byref(nid)))
return TreeNode(nid.value, self)
def addNode(self,nodename,usage='ANY'):
"""Add a node to the tree. Tree must be in edit mode.
@param nodename: Absolute or relative path specification of new node. All ancestors of node must exist.
@type nodename: str
@param usage: Usage of node.
@type usage: str
@return: Node created.
@rtype: TreeNode
"""
nid=_C.c_int32(0)
try:
usage_idx=_usage_table[usage.upper()]
except KeyError:
raise UsageError(usage)
usagenum = 1 if usage_idx==11 else usage_idx
with self._lock:
_exc.checkStatus(
_TreeShr._TreeAddNode(self.ctx,
_ver.tobytes(nodename),
_C.byref(nid),
_C.c_int32(usagenum)))
if usage_idx==11:
_exc.checkStatus(
_TreeShr._TreeSetSubtree(self.ctx,nid))
return TreeNode(nid.value,self)
def createPulse(self,shot):
"""Create pulse.
@param shot: Shot number to create
@type shot: int
@rtype: None
"""
_exc.checkStatus(
_TreeShr._TreeCreatePulseFile(self.ctx,
_C.c_int32(int(shot)),
_C.c_int32(0),
_C.c_void_p(0)))
def deleteNode(self,wild):
"""Delete nodes (and all their descendants) from the tree. Note: If node is a member of a device,
all nodes from that device are also deleted as well as any descendants that they might have.
@param wild: Wildcard path speficier of nodes to delete from tree.
@type wild: str
@rtype: None
"""
with self._lock:
first=True
nodes=self.getNodeWild(wild)
for node in nodes:
if first:
reset=_C.c_int32(1)
first=False
else:
reset=_C.c_int32(0)
_exc.checkStatus(
_TreeShr._TreeDeleteNodeInitialize(self.ctx,
node._nid,
0,
reset))
_exc.checkStatus(
_TreeShr._TreeDeleteNodeExecute(self.ctx))
def deletePulse(self,shot):
"""Delete pulse.
@param shot: Shot number to delete
@type shot: int
@rtype: None
"""
_exc.checkStatus(
_TreeShr._TreeDeletePulseFile(self.ctx,
_C.c_int32(int(shot)),
_C.c_int32(1)))
def dir(self):
"""list descendants of top"""
self.top.dir()
def __dir__(self):
"""used for tab completion"""
return [str(n.node_name) for n in self.top.descendants]+_ver.superdir(Tree,self)
def findTagsIter(self, wild):
"""An iterator for the tagnames from a tree given a wildcard specification.
@param wild: wildcard spec.
@type wild: str
@return: iterator of tagnames (strings) that match the wildcard specification
@rtype: iterator
"""
nid=_C.c_int32(0)
tagctx=_C.c_void_p(0)
_TreeShr._TreeFindTagWild.restype=_C.c_char_p
try:
while True:
tag_ptr = _TreeShr._TreeFindTagWild(self.ctx,
_C.c_char_p(_ver.tobytes(wild)),
_C.byref(nid),
_C.byref(tagctx))
if tag_ptr is None:
break
yield tag_ptr.rstrip()
except GeneratorExit:
pass
_TreeShr.TreeFindTagEnd(_C.byref(tagctx))
def findTags(self,wild):
"""Find tags matching wildcard expression
@param wild: wildcard string to match tagnames.
@type wild: str
@return: Array of tag names matching wildcard expression
@rtype: ndarray
"""
return tuple(self.findTagsIter(wild))
@staticmethod
def getCurrent(treename):
"""Return current shot for specificed treename
@param treename: Name of tree
@type treename: str
@return: Current shot number for the specified tree
@rtype: int
"""
shot=_TreeShr.TreeGetCurrentShotId(_ver.tobytes(treename))
if shot==0:
raise _exc.TreeNOCURRENT()
return shot
def getDefault(self):
"""Return current default TreeNode
@return: Current default node
@rtype: TreeNode
"""
nid=_C.c_int32(0)
_exc.checkStatus(
_TreeShr._TreeGetDefaultNid(self.ctx,
_C.byref(nid)))
return TreeNode(nid.value,self)
def getNode(self,name):
"""Locate node in tree. Returns TreeNode if found. Use double backslashes in node names.
@param name: Name of node. Absolute or relative path. No wildcards.
@type name: str
@return: Node if found
@rtype: TreeNode
"""
if isinstance(name,(int,_scr.Int32)):
ans = TreeNode(name,self)
else:
n=_C.c_int32(0)
_exc.checkStatus(
_TreeShr._TreeFindNode(self.ctx,
_ver.tobytes(str(name)),
_C.byref(n)))
return TreeNode(int(n.value),self)
return ans
def _getNodeWildIter(self, name, *usage):
if len(usage) == 0:
usage_mask=0xFFFF
else :
try:
usage_mask=0
for u in usage:
usage_mask |= 1 << _usage_table[u.upper()]
except KeyError:
raise UsageError(u)
nid=_C.c_int32(0)
ctx=_C.c_void_p(0)
try:
while _TreeShr._TreeFindNodeWild(self.ctx,
_ver.tobytes(name),
_C.byref(nid),
_C.byref(ctx),
_C.c_int32(usage_mask)) & 1 != 0:
yield nid.value
except GeneratorExit:
pass
_TreeShr._TreeFindNodeEnd(self.ctx, _C.pointer(ctx))
def getNodeWildIter(self, name, *usage):
"""An iterator for the nodes in a tree given a wildcard specification.
@param name: Node name. May include wildcards.
@type name: str
@param usage: Optional list of node usages (i.e. "Numeric","Signal",...). Reduces return set by including only nodes with these usages.
@type usage: str
@return: iterator of TreeNodes that match the wildcard and usage specifications
@rtype: iterator
"""
for nid in self._getNodeWildIter(name,*usage):
yield TreeNode(nid,self)
def getNodeWild(self,name,*usage):
"""Find nodes in tree using a wildcard specification. | |
from __future__ import division, unicode_literals
import unittest
from itertools import chain
import serial
import pyfirmata
from pyfirmata import mockup
from pyfirmata.boards import BOARDS
from pyfirmata.util import (break_to_bytes, from_two_bytes,
str_to_two_byte_iter, to_two_bytes,
two_byte_iter_to_str)
# Messages todo left:
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# set pin mode(I/O) 0xF4 pin # (0-127) pin state(0=in)
# system reset 0xFF
class BoardBaseTest(unittest.TestCase):
def setUp(self):
# Test with the MockupSerial so no real connection is needed
pyfirmata.pyfirmata.serial.Serial = mockup.MockupSerial
# Set the wait time to a zero so we won't have to wait a couple of secs
# each test
pyfirmata.pyfirmata.BOARD_SETUP_WAIT_TIME = 0
self.board = pyfirmata.Board('', BOARDS['arduino'])
self.board._stored_data = []
# FIXME How can it be that a fresh instance sometimes still contains data?
class TestBoardMessages(BoardBaseTest):
# TODO Test layout of Board Mega
def assert_serial(self, *incoming_bytes):
serial_msg = bytearray()
res = self.board.sp.read()
while res:
serial_msg += res
res = self.board.sp.read()
self.assertEqual(bytearray(incoming_bytes), serial_msg)
# First test the handlers
def test_handle_analog_message(self):
self.board.analog[3].reporting = True
self.assertEqual(self.board.analog[3].read(), None)
# This sould set it correctly. 1023 (127, 7 in to 7 bit bytes) is the
# max value an analog pin will send and it should result in a value 1
self.board._handle_analog_message(3, 127, 7)
self.assertEqual(self.board.analog[3].read(), 1.0)
def test_handle_digital_message(self):
# A digital message sets the value for a whole port. We will set pin
# 5 (That is on port 0) to 1 to test if this is working.
self.board.digital_ports[0].reporting = True
self.board.digital[5]._mode = 0 # Set it to input
# Create the mask
mask = 0
mask |= 1 << 5 # set the bit for pin 5 to to 1
self.assertEqual(self.board.digital[5].read(), None)
self.board._handle_digital_message(0, mask % 128, mask >> 7)
self.assertEqual(self.board.digital[5].read(), True)
def test_handle_report_version(self):
self.assertEqual(self.board.firmata_version, None)
self.board._handle_report_version(2, 1)
self.assertEqual(self.board.firmata_version, (2, 1))
def test_handle_report_firmware(self):
self.assertEqual(self.board.firmware, None)
data = bytearray([2, 1])
data.extend(str_to_two_byte_iter('Firmware_name'))
self.board._handle_report_firmware(*data)
self.assertEqual(self.board.firmware, 'Firmware_name')
self.assertEqual(self.board.firmware_version, (2, 1))
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# analog I/O message 0xE0 pin # LSB(bits 0-6) MSB(bits 7-13)
def test_incoming_analog_message(self):
self.assertEqual(self.board.analog[4].read(), None)
self.assertEqual(self.board.analog[4].reporting, False)
# Should do nothing as the pin isn't set to report
self.board.sp.write([pyfirmata.ANALOG_MESSAGE + 4, 127, 7])
self.board.iterate()
self.assertEqual(self.board.analog[4].read(), None)
self.board.analog[4].enable_reporting()
self.board.sp.clear()
# This should set analog port 4 to 1
self.board.sp.write([pyfirmata.ANALOG_MESSAGE + 4, 127, 7])
self.board.iterate()
self.assertEqual(self.board.analog[4].read(), 1.0)
self.board._stored_data = []
def test_handle_capability_response(self):
"""
Capability Response codes:
# INPUT: 0, 1
# OUTPUT: 1, 1
# ANALOG: 2, 10
# PWM: 3, 8
# SERV0: 4, 14
# I2C: 6, 1
Arduino's Example: (ATMega328P-PU)
(127,
127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 6, 1, 127,
0, 1, 1, 1, 2, 10, 6, 1, 127)
"""
test_layout = {
'digital': (0, 1, 2),
'analog': (0, 1),
'pwm': (1, 2),
'servo': (0, 1, 2),
# 'i2c': (2), # TODO 2.3 specs
'disabled': (0,),
}
# Eg: (127)
unavailible_pin = [
0x7F, # END_SYSEX (Pin delimiter)
]
# Eg: (0, 1, 1, 1, 3, 8, 4, 14, 127)
digital_pin = [
0x00, # INPUT
0x01,
0x01, # OUTPUT
0x01,
0x03, # PWM
0x08,
0x7F, # END_SYSEX (Pin delimiter)
]
# Eg. (0, 1, 1, 1, 4, 14, 127)
analog_pin = [
0x00, # INPUT
0x01,
0x01, # OUTPUT
0x01,
0x02, # ANALOG
0x0A,
0x06, # I2C
0x01,
0x7F, # END_SYSEX (Pin delimiter)
]
data_arduino = list(
[0x6C] # CAPABILITY_RESPONSE
+ unavailible_pin
+ digital_pin * 2
+ analog_pin * 2
)
self.board._handle_report_capability_response(*data_arduino)
for key in test_layout.keys():
self.assertEqual(self.board._layout[key], test_layout[key])
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# digital I/O message 0x90 port LSB(bits 0-6) MSB(bits 7-13)
def test_incoming_digital_message(self):
# A digital message sets the value for a whole port. We will set pin
# 9 (on port 1) to 1 to test if this is working.
self.board.digital[9].mode = pyfirmata.INPUT
self.board.sp.clear() # clear mode sent over the wire.
# Create the mask
mask = 0
mask |= 1 << (9 - 8) # set the bit for pin 9 to to 1
self.assertEqual(self.board.digital[9].read(), None)
self.board.sp.write([pyfirmata.DIGITAL_MESSAGE + 1, mask % 128, mask >> 7])
self.board.iterate()
self.assertEqual(self.board.digital[9].read(), True)
# version report format
# -------------------------------------------------
# 0 version report header (0xF9) (MIDI Undefined)
# 1 major version (0-127)
# 2 minor version (0-127)
def test_incoming_report_version(self):
self.assertEqual(self.board.firmata_version, None)
self.board.sp.write([pyfirmata.REPORT_VERSION, 2, 1])
self.board.iterate()
self.assertEqual(self.board.firmata_version, (2, 1))
# Receive Firmware Name and Version (after query)
# 0 START_SYSEX (0xF0)
# 1 queryFirmware (0x79)
# 2 major version (0-127)
# 3 minor version (0-127)
# 4 first 7-bits of firmware name
# 5 second 7-bits of firmware name
# x ...for as many bytes as it needs)
# 6 END_SYSEX (0xF7)
def test_incoming_report_firmware(self):
self.assertEqual(self.board.firmware, None)
self.assertEqual(self.board.firmware_version, None)
msg = [pyfirmata.START_SYSEX,
pyfirmata.REPORT_FIRMWARE,
2,
1] + list(str_to_two_byte_iter('Firmware_name')) + \
[pyfirmata.END_SYSEX]
self.board.sp.write(msg)
self.board.iterate()
self.assertEqual(self.board.firmware, 'Firmware_name')
self.assertEqual(self.board.firmware_version, (2, 1))
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# report analog pin 0xC0 pin # disable/enable(0/1) - n/a -
def test_report_analog(self):
self.board.analog[1].enable_reporting()
self.assert_serial(0xC0 + 1, 1)
self.assertTrue(self.board.analog[1].reporting)
self.board.analog[1].disable_reporting()
self.assert_serial(0xC0 + 1, 0)
self.assertFalse(self.board.analog[1].reporting)
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# report digital port 0xD0 port disable/enable(0/1) - n/a -
def test_report_digital(self):
# This should enable reporting of whole port 1
self.board.digital[8]._mode = pyfirmata.INPUT # Outputs can't report
self.board.digital[8].enable_reporting()
self.assert_serial(0xD0 + 1, 1)
self.assertTrue(self.board.digital_ports[1].reporting)
self.board.digital[8].disable_reporting()
self.assert_serial(0xD0 + 1, 0)
# Generic Sysex Message
# 0 START_SYSEX (0xF0)
# 1 sysex command (0x00-0x7F)
# x between 0 and MAX_DATA_BYTES 7-bit bytes of arbitrary data
# last END_SYSEX (0xF7)
def test_send_sysex_message(self):
# 0x79 is queryFirmware, but that doesn't matter for now
self.board.send_sysex(0x79, [1, 2, 3])
sysex = (0xF0, 0x79, 1, 2, 3, 0xF7)
self.assert_serial(*sysex)
def test_send_sysex_string(self):
self.board.send_sysex(0x79, bytearray("test", 'ascii'))
sysex = [0xF0, 0x79]
sysex.extend(bytearray('test', 'ascii'))
sysex.append(0xF7)
self.assert_serial(*sysex)
def test_send_sysex_too_big_data(self):
self.assertRaises(ValueError, self.board.send_sysex, 0x79, [256, 1])
def test_receive_sysex_message(self):
sysex = bytearray([0xF0, 0x79, 2, 1, ord('a'), 0, ord('b'),
0, ord('c'), 0, 0xF7])
self.board.sp.write(sysex)
while len(self.board.sp):
self.board.iterate()
self.assertEqual(self.board.firmware_version, (2, 1))
self.assertEqual(self.board.firmware, 'abc')
def test_too_much_data(self):
"""
When we send random bytes, before or after a command, they should be
ignored to prevent cascading errors when missing a byte.
"""
self.board.analog[4].enable_reporting()
self.board.sp.clear()
# Crap
self.board.sp.write([i for i in range(10)])
# This should set analog port 4 to 1
self.board.sp.write([pyfirmata.ANALOG_MESSAGE + 4, 127, 7])
# Crap
self.board.sp.write([10 - i for i in range(10)])
while len(self.board.sp):
self.board.iterate()
self.assertEqual(self.board.analog[4].read(), 1.0)
# Servo config
# --------------------
# 0 START_SYSEX (0xF0)
# 1 SERVO_CONFIG (0x70)
# 2 pin number (0-127)
# 3 minPulse LSB (0-6)
# 4 minPulse MSB (7-13)
# 5 maxPulse LSB (0-6)
# 6 maxPulse MSB (7-13)
# 7 END_SYSEX (0xF7)
#
# then sets angle
# 8 analog I/O message (0xE0)
# 9 angle LSB
# 10 angle MSB
def test_servo_config(self):
self.board.servo_config(2)
data = chain([0xF0, 0x70, 2], to_two_bytes(544),
to_two_bytes(2400), [0xF7, 0xE0 + 2, 0, 0])
self.assert_serial(*list(data))
def test_servo_config_min_max_pulse(self):
self.board.servo_config(2, 600, 2000)
data = chain([0xF0, 0x70, 2], to_two_bytes(600),
to_two_bytes(2000), [0xF7, 0xE0 + 2, 0, 0])
self.assert_serial(*data)
def test_servo_config_min_max_pulse_angle(self):
self.board.servo_config(2, 600, 2000, angle=90)
data = chain([0xF0, 0x70, 2], to_two_bytes(600),
to_two_bytes(2000), [0xF7])
angle_set = [0xE0 + 2, 90 % 128,
90 >> 7] # Angle set happens through analog message
data = list(data) + angle_set
self.assert_serial(*data)
def test_servo_config_invalid_pin(self):
self.assertRaises(IOError, self.board.servo_config, 1)
def test_set_mode_servo(self):
p = self.board.digital[2]
p.mode = pyfirmata.SERVO
data = chain([0xF0, 0x70, 2], to_two_bytes(544),
to_two_bytes(2400), [0xF7, 0xE0 + 2, 0, 0])
self.assert_serial(*data)
class TestBoardLayout(BoardBaseTest):
def test_layout_arduino(self):
| |
1),
(9, 3, 3, 2): (0, 1),
(9, 3, 3, 3): (0, 1),
(9, 3, 3, 4): (0, 1),
(9, 3, 3, 5): (0, 1),
(9, 3, 4, -5): (0, 1),
(9, 3, 4, -4): (0, 1),
(9, 3, 4, -3): (0, 1),
(9, 3, 4, -2): (0, 1),
(9, 3, 4, -1): (0, 1),
(9, 3, 4, 0): (0, 1),
(9, 3, 4, 1): (0, 1),
(9, 3, 4, 2): (0, 1),
(9, 3, 4, 3): (0, 1),
(9, 3, 4, 4): (0, 1),
(9, 3, 4, 5): (0, 1),
(9, 3, 5, -5): (0, 1),
(9, 3, 5, -4): (0, 1),
(9, 3, 5, -3): (0, 1),
(9, 3, 5, -2): (0, 1),
(9, 3, 5, -1): (0, 1),
(9, 3, 5, 0): (0, 1),
(9, 3, 5, 1): (0, 1),
(9, 3, 5, 2): (0, 1),
(9, 3, 5, 3): (0, 1),
(9, 3, 5, 4): (0, 1),
(9, 3, 5, 5): (0, 1),
(9, 4, -5, -5): (0, 1),
(9, 4, -5, -4): (0, 0),
(9, 4, -5, -3): (0, 1),
(9, 4, -5, -2): (0, 1),
(9, 4, -5, -1): (0, 1),
(9, 4, -5, 0): (0, 1),
(9, 4, -5, 1): (0, 1),
(9, 4, -5, 2): (0, 1),
(9, 4, -5, 3): (0, 1),
(9, 4, -5, 4): (0, 1),
(9, 4, -5, 5): (0, 1),
(9, 4, -4, -5): (-1, 1),
(9, 4, -4, -4): (-1, 0),
(9, 4, -4, -3): (0, 1),
(9, 4, -4, -2): (0, 1),
(9, 4, -4, -1): (0, 1),
(9, 4, -4, 0): (0, 1),
(9, 4, -4, 1): (0, 1),
(9, 4, -4, 2): (0, 1),
(9, 4, -4, 3): (0, 1),
(9, 4, -4, 4): (1, 1),
(9, 4, -4, 5): (1, 0),
(9, 4, -3, -5): (0, 1),
(9, 4, -3, -4): (-1, 1),
(9, 4, -3, -3): (-1, 1),
(9, 4, -3, -2): (-1, 1),
(9, 4, -3, -1): (-1, 1),
(9, 4, -3, 0): (1, 1),
(9, 4, -3, 1): (1, 1),
(9, 4, -3, 2): (1, 1),
(9, 4, -3, 3): (1, 1),
(9, 4, -3, 4): (1, 1),
(9, 4, -3, 5): (1, 0),
(9, 4, -2, -5): (1, 0),
(9, 4, -2, -4): (1, 0),
(9, 4, -2, -3): (1, -1),
(9, 4, -2, -2): (-1, 0),
(9, 4, -2, -1): (1, 1),
(9, 4, -2, 0): (1, 1),
(9, 4, -2, 1): (1, 1),
(9, 4, -2, 2): (1, 1),
(9, 4, -2, 3): (1, 1),
(9, 4, -2, 4): (1, 1),
(9, 4, -2, 5): (1, 0),
(9, 4, -1, -5): (1, 0),
(9, 4, -1, -4): (1, 0),
(9, 4, -1, -3): (1, -1),
(9, 4, -1, -2): (-1, 1),
(9, 4, -1, -1): (1, 1),
(9, 4, -1, 0): (1, 1),
(9, 4, -1, 1): (1, 1),
(9, 4, -1, 2): (1, 1),
(9, 4, -1, 3): (1, 1),
(9, 4, -1, 4): (1, 1),
(9, 4, -1, 5): (1, 0),
(9, 4, 0, -5): (0, 1),
(9, 4, 0, -4): (0, 0),
(9, 4, 0, -3): (1, 1),
(9, 4, 0, -2): (1, 1),
(9, 4, 0, -1): (0, 1),
(9, 4, 0, 0): (0, 1),
(9, 4, 0, 1): (0, 1),
(9, 4, 0, 2): (0, 1),
(9, 4, 0, 3): (0, 1),
(9, 4, 0, 4): (0, 1),
(9, 4, 0, 5): (0, 1),
(9, 4, 1, -5): (0, 1),
(9, 4, 1, -4): (0, 1),
(9, 4, 1, -3): (0, 1),
(9, 4, 1, -2): (0, 1),
(9, 4, 1, -1): (0, 1),
(9, 4, 1, 0): (-1, 1),
(9, 4, 1, 1): (-1, 1),
(9, 4, 1, 2): (-1, 1),
(9, 4, 1, 3): (-1, 1),
(9, 4, 1, 4): (-1, 1),
(9, 4, 1, 5): (-1, 1),
(9, 4, 2, -5): (0, 1),
(9, 4, 2, -4): (0, 1),
(9, 4, 2, -3): (0, 1),
(9, 4, 2, -2): (0, 1),
(9, 4, 2, -1): (0, 1),
(9, 4, 2, 0): (0, 1),
(9, 4, 2, 1): (0, 1),
(9, 4, 2, 2): (0, 1),
(9, 4, 2, 3): (0, 1),
(9, 4, 2, 4): (0, 1),
(9, 4, 2, 5): (0, 1),
(9, 4, 3, -5): (0, 1),
(9, 4, 3, -4): (0, 1),
(9, 4, 3, -3): (0, 1),
(9, 4, 3, -2): (0, 1),
(9, 4, 3, -1): (0, 1),
(9, 4, 3, 0): (0, 1),
(9, 4, 3, 1): (0, 1),
(9, 4, 3, 2): (0, 1),
(9, 4, 3, 3): (0, 1),
(9, 4, 3, 4): (0, 1),
(9, 4, 3, 5): (0, 1),
(9, 4, 4, -5): (0, 1),
(9, 4, 4, -4): (0, 1),
(9, 4, 4, -3): (0, 1),
(9, 4, 4, -2): (0, 1),
(9, 4, 4, -1): (0, 1),
(9, 4, 4, 0): (0, 1),
(9, 4, 4, 1): (0, 1),
(9, 4, 4, 2): (0, 1),
(9, 4, 4, 3): (0, 1),
(9, 4, 4, 4): (0, 1),
(9, 4, 4, 5): (0, 1),
(9, 4, 5, -5): (0, 1),
(9, 4, 5, -4): (0, 1),
(9, 4, 5, -3): (0, 1),
(9, 4, 5, -2): (0, 1),
(9, 4, 5, -1): (0, 1),
(9, 4, 5, 0): (0, 1),
(9, 4, 5, 1): (0, 1),
(9, 4, 5, 2): (0, 1),
(9, 4, 5, 3): (0, 1),
(9, 4, 5, 4): (0, 1),
(9, 4, 5, 5): (0, 1),
(9, 5, -5, -5): (0, 0),
(9, 5, -5, -4): (0, 1),
(9, 5, -5, -3): (0, 1),
(9, 5, -5, -2): (0, 1),
(9, 5, -5, -1): (0, 1),
(9, 5, -5, 0): (0, 1),
(9, 5, -5, 1): (0, 1),
(9, 5, -5, 2): (0, 1),
(9, 5, -5, 3): (0, 1),
(9, 5, -5, 4): (0, 1),
(9, 5, -5, 5): (0, 1),
(9, 5, -4, -5): (-1, 0),
(9, 5, -4, -4): (0, 1),
(9, 5, -4, -3): (0, 1),
(9, 5, -4, -2): (0, 1),
(9, 5, -4, -1): (0, 1),
(9, 5, -4, 0): (0, 1),
(9, 5, -4, 1): (0, 1),
(9, 5, -4, 2): (0, 1),
(9, 5, -4, 3): (1, 1),
(9, 5, -4, 4): (1, 1),
(9, 5, -4, 5): (1, 0),
(9, 5, -3, -5): (-1, 1),
(9, 5, -3, -4): (-1, 1),
(9, 5, -3, -3): (-1, 1),
(9, 5, -3, -2): (-1, 1),
(9, 5, -3, -1): (-1, 1),
(9, 5, -3, 0): (1, 1),
(9, 5, -3, 1): (1, 1),
(9, 5, -3, 2): (1, 1),
(9, 5, -3, 3): (1, 1),
(9, 5, -3, 4): (1, 1),
(9, 5, -3, 5): (1, 0),
(9, 5, -2, -5): (1, 0),
(9, 5, -2, -4): (1, -1),
(9, 5, -2, -3): (-1, 0),
(9, 5, -2, -2): (0, 1),
(9, 5, -2, -1): (1, 1),
(9, 5, -2, 0): (1, 1),
(9, 5, -2, 1): (1, 1),
(9, 5, -2, 2): (1, 1),
(9, 5, -2, 3): (1, 1),
(9, 5, -2, 4): (1, 1),
(9, 5, -2, 5): (1, 0),
(9, 5, -1, -5): (1, 0),
(9, 5, -1, -4): (1, -1),
(9, 5, -1, -3): (-1, 1),
(9, 5, -1, -2): (-1, 1),
(9, 5, -1, -1): (1, 1),
(9, 5, -1, 0): (1, 1),
(9, 5, -1, 1): (1, 1),
(9, 5, -1, 2): (1, 1),
(9, 5, -1, 3): (1, 1),
(9, 5, -1, 4): (1, 1),
(9, 5, -1, 5): (1, 0),
(9, 5, 0, -5): (0, 0),
(9, 5, 0, -4): (1, 1),
(9, 5, 0, -3): (1, 1),
(9, 5, 0, -2): (1, 1),
(9, 5, 0, -1): (0, 1),
(9, 5, 0, 0): (0, 1),
(9, 5, 0, 1): (0, 1),
(9, 5, 0, 2): (0, 1),
(9, 5, 0, 3): (0, 1),
(9, 5, 0, 4): (0, 1),
(9, 5, 0, 5): (0, 1),
(9, | |
"""
Functions for processing/analysing ARNA campaign observations
"""
import os
import sys
import gc
import glob
import xarray as xr
import numpy as np
import AC_tools as AC
import pandas as pd
from netCDF4 import Dataset
from datetime import datetime as datetime_
import datetime as datetime
import time
from time import gmtime, strftime
# Import from elsewhere in ARNA module
from . core import *
from . utils import *
def get_coordinates_from_NetCDF_file(ds=None, folder=None, filename=None,
falt_var='PS_RVSM',
flat_var='LAT_GIN', flon_var='LON_GIN',
AltVar='hPa', LonVar='lon', LatVar='lat',
ftime_var='Time', TimeVar='time',
convert_m2hPa=False, drop_NaNs=True):
"""
Get locations (lat, lon, alt) from NetCDF files
"""
import pandas as pd
# Make a dataframne of locations from NEtCDF file
if isinstance(ds, type(None)):
ds = xr.open_dataset(folder+filename)
df = pd.DataFrame()
df[AltVar] = ds[falt_var].values
df[LonVar] = ds[flon_var].values
df[LatVar] = ds[flat_var].values
df.index = ds[ftime_var].values
# Convert metres of height to hPa
# NOTE: The below conversion is not advised.
# Use the external pressure variable instead (PS_RVSM).
if convert_m2hPa:
df.loc[:, AltVar] = AC.hPa_to_Km(df[AltVar].values/1E3, reverse=True, )
# Drop where there are not values for all coordinates
if drop_NaNs:
df = df.dropna()
return df
def get_ARNA_flights_as_dfs():
"""
Retrieve the ARNA flights as a list of dataframes
"""
flight_nums = [216, 217, 218, 219, 220, 221, 222, 223, 224, 225]
flight_IDs = ['C{}'.format(i) for i in flight_nums]
dfs = {}
for flight_ID in flight_IDs:
print(flight_ID)
try:
df = AC.get_FAAM_locations_as_df(flight_ID=flight_ID)
dfs[flight_ID] = df
except:
print('WARNING: failed for {}'.format(flight_ID))
return dfs
def set_flagged_data2NaNs(ds, VarName='no_mr', flag2use=0,
FlagName='no_flag'):
"""
Set the flagged data in a dataset to be NaNs
"""
# What do the flags mean? (copied from FAAM NetCDF metadata)
# ( file: 'core-nitrates_faam_20200211_v002_r1_c224.nc')
# Flag=0 indicates good data.
# Flag=1 indicates reduced quality data.
# Flag=2 indicates suspect data.
# Flag=3 indicates missing or invalid data.
# Create a boolean
bool = ds[FlagName].values != flag2use
# Make values without the flagged value into NaNs
ds[VarName].loc[bool] = np.NaN
return ds
def get_SWAS_data4flight(flight_ID=None):
"""
Retrieve SWAS data for ARNA flights
"""
# Where is the data?
folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'SWAS')
# filename = 'ARNA-FIRSTLOOK-SWAS_JDL_typo_fix.csv'
# var2use = 'comments4'
# format = '%d/%m/%Y %H:%M:%S'
# Or use latest file (NOTE: issue with column formating)
# filename = 'ARNA-SECONDLOOK-SWAS.csv'
# Use the updated second look file
filename = 'ARNA-SECONDLOOK-SWAS_v2.csv'
df = pd.read_csv(folder+filename)
print(filename)
# Update the index to use the SWAS fire fime
var2use = 'SAMPLE START TIME'
format = '%d/%m/%Y %H:%M:%S'
df.index = pd.to_datetime(df[var2use].values, format=format)
# If a flight ID stated, then only return points for that flight
if isinstance(flight_ID, type(None)):
pass
else:
# Get the beginning and end of the flight
dfS = get_summary4flight(flight_ID=flight_ID)
sdate = dfS.index.min()
edate = dfS.index.max()
# Only consider samples within this time
df = df.loc[df.index >= sdate, :]
df = df.loc[df.index <= edate, :]
return df
def map_SWAS_var2GEOS_var(var, invert=False):
"""
Map variables names from SWAS to GEOS variable names
"""
d = {
# '1_3_butadiene':,
# '1_butene':,
# '2_3_methylpentane':,
# '224_tmp':,
'acetaldehyde': 'ALD2',
'acetone': 'ACET',
# 'acetylene':,
'benzene': 'BENZ', # GEOSChem, but not GEOS-CF output
# 'benzenechb':,
'cis_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'cyclo_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'dms': 'DMS', # GEOSChem, but not GEOS-CF output
# 'dmschb':,
'ethane': 'C2H6',
# 'ethene':,
# 'ethylbenzene':,
# 'extra_1':,
# 'extra_2':,
# 'extra_3':,
# 'extra_4':,
# 'extra_5':,
# 'extra_l2':,
'iso_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'iso_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'iso_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'isoprene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'methanol': 'MOH',
'mp_xylene': 'XYLE',
'n_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_heptane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_hexane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_octane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'o_xylene': 'XYLE',
'pent_1_ene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'propane': 'C3H8',
'propene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'toluene': 'TOLU',
# 'toluenechb':,
'trans_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'trans_2_pentene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
}
# Invert the dictionary?
if invert:
d = {v: k for k, v in list(d.items())}
return d[var]
def get_ARNA_flight_log_as_df():
"""
Make a single pd.DataFrame with all flight summaries
"""
flight_nums = [
# 216,
217, 218, 219, 220, 221, 222, 223, 224, 225
]
flight_IDs = ['C{}'.format(i) for i in flight_nums]
dfs = []
for flight_ID in flight_IDs:
dfs += [get_summary4flight(flight_ID=flight_ID)]
# Combine and return as a single dataframe sorted by time
df = pd.concat(dfs)
df = df.sort_index()
return df
def get_summary4flight(flight_ID='C217'):
"""
retrieve a FAAM flight summary as a dataframe
"""
folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'CEDA/v2020_06')
filename = 'flight-sum_faam_*_*_{}.csv'.format(flight_ID.lower())
file2use = glob.glob(folder+filename)
ass_str = 'WARNING: {} flight summaries found present for flight {}!'
assert len(file2use) == 1, ass_str.format(file2use, flight_ID)
# Add Gotcha for missing header in FAAM file archived at CEDA
columns = [
'Event', 'Start', 'Start Hdg / °', 'Start Hgt / kft', 'Start Lat / °',
'Start Long / °', 'Stop', 'Stop Hdg / °', 'Stop Hgt / kft',
'Stop Lat / °', ' Stop Long / °', 'Comment',
]
if flight_ID == 'C217':
header = None
names = columns
else:
header = 0
names = None
# Read file
df = pd.read_csv(file2use[0], header=header, names=names)
# Add a flight ID column
df['flight_ID'] = flight_ID
# Update start column to be in datatime format
var2use = 'Start'
format = '%Y-%m-%d %H:%M:%S'
df.index = pd.to_datetime(df[var2use].values, format=format)
return df
def get_filters_data4flight(flight_ID='C217', all_flights=True):
"""
Retrieve filters data from ARNA flights
"""
# Where is the data?
folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'Filters')
# What is the name of the sheet in the excel file?
# filename = 'Overview_all_filters_ACSIS_5_and_ARNA-1.xlsx'
# filename = 'Overview_filters_ARNA_2.xlsx'
filename = 'Overview_filters_ARNA_2_TMS_edits.xlsx'
sheet_name = 'Sheet1'
dfFULL = pd.read_excel(folder + filename, sheet_name=sheet_name)
# Now Just look at core data of interest
CoreCols = [
'Day', 'Flight', 'Filter', 'height', 'Time on', 'Time off',
'Airflow (stL)',
]
# - Select nitrate data
# Yes, GEOS-CF has sulfate variables output - 'NIT', 'NITs'
NO3cols = [i for i in dfFULL.columns if 'NO3' in i]
dfN = dfFULL[CoreCols + NO3cols]
#
# NO3_var = 'NO3.total'
NO3_var2use = ['NO3.2', 'NO3.5']
units = 'nanomoles/m3'
# NO3_idx = [list(dfFULL.columns).index(i) for i in var2inc]
# - Also save sulfate?
# Yes, GEOS-CF has sulfate variables output - 'SO4', 'SO4s'
# SO4cols = [i for i in dfFULL.columns if 'SO4' in i]
# dfS = dfFULL[CoreCols + SO4cols]
SO4_var2use = ['SO4.2', 'SO4.5']
units = 'nanomoles/m3'
# SO4_idx = [list(dfFULL.columns).index(i) for i in SO4_var2use]
# - Now chop off excess headers and make sure formats are correct
df = dfFULL.loc[dfFULL.index[2:], :]
#
# idx2use = [list(dfFULL.columns).index(i) for i in CoreCols]
# idx2use += NO3_idx + SO4_idx
# cols2use = [list(dfFULL.columns)[i] for i in idx2use ]
df = df[CoreCols + NO3_var2use + SO4_var2use]
# Replace values less than black/NaNs with np.NaN
df = df.replace('lower than blank', np.NaN)
df = df.replace('NaN', np.NaN)
# Remove blanks (as these are NaNs)
df = df.rename_axis(None)
df = df.loc[(df['height'] != 'blank').values, :]
# Update sampling times to date times
# Start time
TimeOnVar = 'Time on'
sdate_var = 'Sample Start'
df[sdate_var] = df['Day'].astype(str) + ' ' + df[TimeOnVar].astype(str)
format = '%Y-%m-%d %H:%M:%S'
df[sdate_var] = pd.to_datetime(df[sdate_var].values, format=format)
del df[TimeOnVar]
# End time
TimeOffVar = 'Time off'
edate_var = 'Sample End'
df[edate_var] = df['Day'].astype(str) + ' ' + df[TimeOffVar].astype(str)
format = '%Y-%m-%d %H:%M:%S'
df[edate_var] = pd.to_datetime(df[edate_var].values, format=format)
del df[TimeOffVar]
# calculate mid point of sampling and set this as index
interval_var = 'Sample interval'
df[interval_var] = df[edate_var] - df[sdate_var]
# Just use the middle of the timestep
df.index = df[sdate_var] + (df[interval_var]/2)
df = df.rename_axis(None)
# - Just consider totals for species of interest
NO3_var = 'NO3.total'
df[NO3_var] = df[NO3_var2use].sum(axis=1)
SO4_var = 'SO4.total'
df[SO4_var] = df[SO4_var2use].sum(axis=1)
del dfFULL
# Convert to ug/m3 from 'nanomoles/m3'
df[NO3_var] = df[NO3_var].values / 1E9 * AC.species_mass('NIT') * 1E6
df[SO4_var] = df[SO4_var].values / 1E9 * AC.species_mass('SO4') * 1E6
# Return all flights unless a specific flight | |
# /.venv/bin/python
import os
import shutil
import sys
import time
import webbrowser
from random import randint
from libs import metadata
from libs.testloop import Test
from libs.testloop import langDict, show_dirs, log, now, nonsense
from libs.testloop import TestExistsError, CorruptedTestError
# gui utilities
import tkinter as tk
from tkinter import messagebox
from PyQt5 import QtGui
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from libs.gui.about import Ui_About
from libs.gui.newtest import Ui_Dialog
from libs.gui.note import Ui_Note_Dialog
from libs.gui.result import ResultDialog
from libs.gui.resume import Ui_Resume
from libs.gui.settings import Ui_Settings
from libs.gui.splash import Ui_Splash
from libs.gui.window import Ui_MainWindow
from libs.gui.record import Ui_Record
from libs.gui.testlist import Ui_Testlist_DIalog
class GuiTest(Test):
pass
class RecordDialog(QDialog):
def __init__(self):
super(RecordDialog, self).__init__()
self.ui = Ui_Record()
self.ui.setupUi(self)
self.ui.pushButton.pressed.connect(lambda: self.measure_noise())
self.setWindowIcon(QtGui.QIcon('libs/gui/ico.ico'))
self.setWindowTitle("VoRTEx")
def measure_noise(self):
self.ui.pushButton.setEnabled(False)
self.ui.pushButton.setText("Recording...")
app.processEvents()
t.measure_noise()
self.ui.label.setText("Done! Background noise: %0.2fdBA" % t.noise)
self.ui.pushButton.setEnabled(True)
self.ui.pushButton.setText("OK")
self.ui.pushButton.pressed.disconnect()
self.ui.pushButton.pressed.connect(lambda: self.close())
def measure_noise_radio(self):
self.ui.pushButton.setEnabled(False)
self.ui.pushButton.setText("Recording...")
app.processEvents()
t.measure_noise_radio()
self.ui.label.setText("Done! Background noise + radio: %0.2fdBA" % t.noise_radio)
self.ui.pushButton.setEnabled(True)
self.ui.pushButton.setText("OK")
self.ui.pushButton.pressed.disconnect()
self.ui.pushButton.pressed.connect(lambda: self.close())
# main window
class MyMain(QMainWindow):
def __init__(self):
super(MyMain, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.raise_()
self.setWindowIcon(QtGui.QIcon('libs/gui/ico.ico'))
self.setStyleSheet(stylesheet)
self.autosave = True
# actions
self.ui.actionNew_2.triggered.connect(self.on_newbutton_triggered)
self.ui.actionSave.triggered.connect(lambda: self.save_pressed())
self.ui.actionResume.triggered.connect(lambda: self.resume_pressed())
self.ui.actionQuit.triggered.connect(lambda: self.close())
self.ui.actionOnline_guide.triggered.connect(lambda: self.open_guide())
self.ui.actionAudio_device.triggered.connect(lambda: self.open_settings())
self.ui.actionAbout.triggered.connect(lambda: self.about())
# buttons
self.ui.cancel_button.pressed.connect(lambda: self.cancel_pressed())
self.ui.pushButton.pressed.connect(lambda: self.lombard_pressed())
self.ui.pushButton_2.pressed.connect(lambda: self.measure_noise())
self.ui.pushButton_3.pressed.connect(lambda: self.measure_noise_radio())
self.ui.playButton.pressed.connect(lambda: self.do_test())
self.ui.playButton.pressed.connect(lambda: self.update())
self.ui.playButton.pressed.connect(lambda: self.update_screens())
self.ui.repeatButton.pressed.connect(lambda: self.repeat())
self.ui.printButton.setText("Export csv")
self.ui.printButton.clicked.connect(lambda: self.print_csv())
self.ui.commandsBox.doubleClicked.connect(lambda: self.double_clicked_command())
self.condition = -1
self.update_screens()
self.update()
self.ww_waiting = False
print("Current test: %s" % t.current_test)
app.processEvents()
def cancel_pressed(self):
self.condition = len(t.sequence[t.testlist[t.current_test]])
self.do_test()
@staticmethod
def recap_test():
messagebox.showinfo("%s" % t.testName, "Test progress: \n"
"language: %s\n"
"status: %d/%d\n"
"results: %d/%d passed" %
(langDict[t.lang], t.current_test, len(t.testlist), t.passes, len(t.testlist)))
def print_csv(self):
t.print_report()
if messagebox.askyesno("VoRTEx", "CSV file saved. Do you want to open it?"):
self.open_csv()
@staticmethod
def open_csv():
os.system(t.report_file.replace("/", "\\"))
@staticmethod
def open_settings():
sett = Settings()
sett.exec_()
def double_clicked_command(self):
self.condition = self.ui.commandsBox.currentRow()
self.do_test()
def repeat(self):
self.condition -= 1
cid = t.sequence[t.testlist[t.current_test]][self.condition].split("\t")[0]
if cid == "000":
log("REPEATING WAKEWORD", t.logname)
else:
log("REPEATING COMMAND", t.logname)
self.do_test()
def measure_noise(self):
if not t.recorder.calibrated[t.earChannel]:
messagebox.showerror("VoRTEx", "You first have to calibrate the ear")
return
r = RecordDialog()
r.ui.label.setText("Turn OFF the radio and press OK to measure the background noise")
r.ui.pushButton.pressed.disconnect()
r.ui.pushButton.pressed.connect(lambda: r.measure_noise())
r.exec_()
self.update()
def measure_noise_radio(self):
if not t.recorder.calibrated[t.earChannel]:
messagebox.showerror("VoRTEx", "You first have to calibrate the ear")
return
r = RecordDialog()
r.ui.label.setText("Turn ON the radio and press OK to measure the background noise")
r.ui.pushButton.pressed.disconnect()
r.ui.pushButton.pressed.connect(lambda: r.measure_noise_radio())
r.exec_()
self.update()
def do_review(self):
if messagebox.askyesno("VoRTEx", "Do you really want to start a new test with just the errors of this one?"):
newname = Note()
newname.ui.label.setText("Choose a name for the test")
newname.exec_()
print("REDOING :")
print(t.redo)
t.new(testname=newname.text, l_index=None, testlist=t.redo)
print("REDOING :")
print(t.testlist)
self.update()
def do_test(self):
print(self.condition)
if self.condition == -1:
# Recap test progress
self.recap_test()
# First step
if t.isLombardEnabled:
if (not t.recorder.calibrated[t.micChannel]) or (not t.recorder.calibrated[t.earChannel]) or (
not t.isMouthCalibrated):
want_to_calibrate = messagebox.askyesno("VoRTEx", "You first have to calibrate the microphones and "
"the mouth in order to apply the Lombard effect. "
"Do you want to do it now?")
if not want_to_calibrate:
t.isLombardEnabled = False
self.update()
else:
if not t.recorder.calibrated[t.micChannel]:
messagebox.showinfo("VoRTEx",
"Please place the measurement mirophone into the calibrator and "
"press OK")
t.calibrate_mic()
messagebox.showinfo("VoRTEx", "Mic calibration completed: dBSPL/dBFS = %0.2f"
% t.recorder.correction[t.mouthChannel])
if not t.recorder.calibrated[t.earChannel]:
messagebox.showinfo("VoRTEx", "Please place the calibrator into the ear and press OK")
t.calibrate_ear()
messagebox.showinfo("VoRTEx", "Mic calibration completed: dBSPL/dBFS = %0.2f"
% t.recorder.correction[t.earChannel])
if not t.isMouthCalibrated:
messagebox.showinfo("VoRTEx",
"Please place the measurement microphone at the MRP and press OK")
t.calibrate_mouth()
messagebox.showinfo("VoRTEx", "Mouth calibration completed: gain = %0.2f"
% t.gain)
self.measure_noise()
self.measure_noise_radio()
else:
self.measure_noise()
self.measure_noise_radio()
if t.status == 0:
# start test from 0
log("MAY THE FORCE BE WITH YOU", t.logname) # the first line of the log file
t.results = {}
t.status = 1
else:
# resume the test
log("WELCOME BACK", t.logname)
# takes just the commands for the chosen language
log("SELECTED LANGUAGE: %s - %s" % (t.lang, langDict[t.lang]), t.logname)
self.condition += 1
self.update()
else:
if self.condition == 0:
t.issued_ww += 1
self.ww_waiting = True
print("WW issued")
if self.condition == 1:
if self.ww_waiting:
print("WW recognized")
t.recognized_ww += 1
self.ww_waiting = False
self.update_screens()
self.ui.commandsBox.setCurrentRow(self.condition)
self.ui.expectedBox.setCurrentRow(self.condition)
try:
previous_cid = t.sequence[t.testlist[t.current_test]][self.condition - 1].split("\t")[0]
command = t.sequence[t.testlist[t.current_test]][self.condition].split("\t")[1].replace("\n", "")
cid = t.sequence[t.testlist[t.current_test]][self.condition].split("\t")[0]
if self.condition == 0:
log("=========================== TEST #%03d ==========================="
% (t.testlist[t.current_test] + 1), t.logname)
if cid == "000":
log("<NAME>", t.logname)
else:
if previous_cid == "000":
log("MIC ACTIVATED", t.logname)
try:
next_command = t.sequence[t.testlist[t.current_test]][self.condition + 1].split("\t")[1].replace(
"\n", "")
next_cid = t.sequence[t.testlist[t.current_test]][self.condition + 1].split("\t")[0]
except IndexError:
try:
next_command = t.sequence[t.testlist[t.current_test + 1]][0].split("\t")[1].replace("\n", "")
next_cid = t.sequence[t.testlist[t.current_test + 1]][0].split("\t")[0]
except IndexError:
next_cid = "000"
next_command = "End"
# Play wave file
filename = t.phrasesPath + "/" + t.lang + "_" + str(next_cid) + ".wav"
app.processEvents()
self.ui.wavLabel.setText("Wave file: %s" % filename)
self.ui.gainLabel.setText("Gain adjust: %0.2fdB" % t.gain)
log("OSCAR: <<%s>> (%s)" % (command, filename), t.logname)
t.play_command(cid)
# play button text
if next_cid == "000":
self.ui.playButton.setText("PTT")
else:
self.ui.playButton.setText("Play command: %s" % next_command)
if self.condition + 1 == len(t.sequence[t.testlist[t.current_test]]):
self.ui.playButton.setText("End test")
pass
self.condition += 1
except IndexError:
t.cancel()
r = self.results()
r_time = now()
if r != "r":
if r == "1":
print("PASS")
log("END_TEST #%03d: PASS" % (t.current_test + 1), t.logname)
elif r == "0":
print("FAIL")
log("END_TEST #%03d: FAILED" % (t.current_test + 1), t.logname)
n = Note()
if r == '0':
n.ui.checkBox.setChecked(True)
n.exec_()
note = n.text
if len(note) > 0:
log("NOTE #%03d: %s" % ((t.current_test + 1), note), t.logname)
result = "%s\t%s\t%s\t" % (r, note, r_time)
if n.is_checked:
print("To be reviewed!")
t.redo.append(t.current_test)
print("REDO's list: ")
print(t.redo)
try:
t.results[str(t.testlist[t.current_test] + 1)] = result
except KeyError:
t.results[str(t.testlist[t.current_test] + 1)] = []
t.results[str(t.testlist[t.current_test] + 1)] = result
self.update()
else:
print("REPEATING")
log("REPEATING TEST", t.logname)
t.current_test -= 1
self.ui.playButton.setText("PTT")
t.current_test += 1
print("Current test: %s" % t.current_test)
if t.current_test == len(t.testlist):
messagebox.showinfo("VoRTEx", "Congratulations! You just completed another test!\n"
"It wasn't easy, you deserve a cup of coffee ;)")
t.status = 2
t.current_test = 0
self.completed()
t.isSaved = False
self.update_table()
self.update_screens()
self.condition = 0
if self.condition > 0:
self.ui.cancel_button.setEnabled(True)
self.ui.repeatButton.setEnabled(True)
else:
self.ui.repeatButton.setEnabled(False)
self.ui.cancel_button.setEnabled(False)
# calibrate
# elif self.condition == 1:
# print(t.sequence[t.testlist[t.status]])
# print("Condition = 1")
def lombard_pressed(self):
t.isLombardEnabled = not t.isLombardEnabled
if t.isLombardEnabled:
if (not t.recorder.calibrated[t.micChannel]) or (not t.recorder.calibrated[t.earChannel]) or (
not t.isMouthCalibrated):
want_to_calibrate = messagebox.askyesno("VoRTEx", "You first have to calibrate the microphones and "
"the mouth in order to apply the Lombard effect. "
"Do you want to do it now?")
if not want_to_calibrate:
t.isLombardEnabled = False
self.update()
else:
if not t.recorder.calibrated[t.micChannel]:
messagebox.showinfo("VoRTEx", "Please place the measurement mirophone into the calibrator and "
"press OK")
t.calibrate_mic()
messagebox.showinfo("VoRTEx", "Mic calibration completed: dBSPL/dBFS = %0.2f"
% t.recorder.correction[t.mouthChannel])
if not t.recorder.calibrated[t.earChannel]:
messagebox.showinfo("VoRTEx", "Please place the calibrator into the ear and press OK")
t.calibrate_ear()
messagebox.showinfo("VoRTEx", "Mic calibration completed: dBSPL/dBFS = %0.2f"
% t.recorder.correction[t.earChannel])
if not t.isMouthCalibrated:
messagebox.showinfo("VoRTEx", "Please place the measurement microphone at the MRP and press OK")
t.calibrate_mouth()
messagebox.showinfo("VoRTEx", "Mouth calibration completed: gain = %0.2f"
% t.gain)
self.measure_noise()
self.measure_noise_radio()
else:
self.measure_noise()
self.measure_noise_radio()
self.update()
@staticmethod
def results():
result_box = TestResultDialog()
result_box.exec()
result = result_box.value
return result
@staticmethod
def note():
n = Note()
n.exec_()
return n.text, n.is_checked
@staticmethod
def open_log():
os.system("notepad %s" % t.logname.replace("/", "\\"))
def completed(self):
self.ui.playButton.disconnect()
self.ui.playButton.setText("Review errors")
self.ui.playButton.pressed.connect(lambda: self.do_review())
@staticmethod
def about():
a = About()
a.exec_()
@staticmethod
def open_guide():
webbrowser.open(guidelink)
def resume_pressed(self):
if len(tests) == 0:
messagebox.showinfo("VoRTEx", "No tests found! Better to start a new one")
else:
resume = Resume()
resume.exec_()
self.update()
def save_pressed(self):
t.save()
t.save_settings()
self.update()
def on_newbutton_triggered(self):
newdialog = NewDialog()
newdialog.exec_()
self.update()
def clear_screens(self):
self.ui.commandsBox.clear()
self.ui.precBox.clear()
self.ui.expectedBox.clear()
def update_score(self):
passed = 0
for i in list(t.results.keys()):
if (t.results[i].split("\t")[0]) == "1":
passed += 1
t.passes = passed
def update_screens(self):
self.clear_screens()
self.ui.completedLabel.setText("Completed: %d test(s)" % t.current_test)
self.ui.groupBox_2.setTitle("Test %s of %s" % (t.current_test % len(t.testlist) + 1, len(t.testlist)))
if t.status == 1:
if self.condition >= 0:
for i in range(len(t.sequence[t.testlist[t.current_test]])):
self.ui.commandsBox.addItem(t.sequence[t.testlist[t.current_test]][i].replace("\t", "->").
replace("\n", ""))
self.ui.expectedBox.clear()
try:
for i in range(len(t.expected[t.testlist[t.current_test]])):
self.ui.expectedBox.addItem(t.sequence[t.testlist[t.current_test]][i].replace("\t", "->").
replace("\n", ""))
except IndexError:
pass
self.ui.precBox.clear()
try:
self.ui.precBox.setText(t.preconditions[t.testlist[t.current_test]])
except NameError:
self.ui.precBox.setText("No preconditions available")
except IndexError:
self.ui.precBox.setText("No preconditions available")
def review_test(self, n_test):
print("Reviewing test %s" % (n_test + 1))
t.current_test = n_test
self.update()
def update_table(self):
horizontal_labels = ["ID", "Result", "Timestamp", "Review", "Note"]
self.ui.tableWidget.setRowCount(len(t.testlist))
self.ui.tableWidget.setColumnCount(len(horizontal_labels))
self.ui.tableWidget.setHorizontalHeaderLabels(horizontal_labels)
btns = []
for i in range(len(t.testlist)):
result = 'TO BE DONE'
note = ""
timestamp = | |
one2many is represented as a list of commands so we used res_model & res_id
displayed_image_id = fields.Many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Cover Image')
legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False)
legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False)
legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False)
is_closed = fields.Boolean(related="stage_id.is_closed", string="Closing Stage", readonly=True, related_sudo=False)
parent_id = fields.Many2one('project.task', string='Parent Task', index=True)
child_ids = fields.One2many('project.task', 'parent_id', string="Sub-tasks", context={'active_test': False})
subtask_project_id = fields.Many2one('project.project', related="project_id.subtask_project_id", string='Sub-task Project', readonly=True)
allow_subtasks = fields.Boolean(string="Allow Sub-tasks", related="project_id.allow_subtasks", readonly=True)
subtask_count = fields.Integer("Sub-task count", compute='_compute_subtask_count')
email_from = fields.Char(string='Email From', help="These people will receive email.", index=True,
compute='_compute_email_from', store="True", readonly=False)
allowed_user_ids = fields.Many2many('res.users', string="Visible to", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False)
project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string="Project Visibility")
# Computed field about working time elapsed between record creation and assignation/closing.
working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator="avg")
working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator="avg")
working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator="avg")
working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator="avg")
# customer portal: include comment and incoming emails in communication history
website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])])
# recurrence fields
allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks')
recurring_task = fields.Boolean(string="Recurrent")
recurring_count = fields.Integer(string="Tasks in Recurrence", compute='_compute_recurring_count')
recurrence_id = fields.Many2one('project.task.recurrence', copy=False)
recurrence_update = fields.Selection([
('this', 'This task'),
('subsequent', 'This and following tasks'),
('all', 'All tasks'),
], default='this', store=False)
recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message')
repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False)
repeat_unit = fields.Selection([
('day', 'Days'),
('week', 'Weeks'),
('month', 'Months'),
('year', 'Years'),
], default='week', compute='_compute_repeat', readonly=False)
repeat_type = fields.Selection([
('forever', 'Forever'),
('until', 'End Date'),
('after', 'Number of Repetitions'),
], default="forever", string="Until", compute='_compute_repeat', readonly=False)
repeat_until = fields.Date(string="End Date", compute='_compute_repeat', readonly=False)
repeat_number = fields.Integer(string="Repetitions", default=1, compute='_compute_repeat', readonly=False)
repeat_on_month = fields.Selection([
('date', 'Date of the Month'),
('day', 'Day of the Month'),
], default='date', compute='_compute_repeat', readonly=False)
repeat_on_year = fields.Selection([
('date', 'Date of the Year'),
('day', 'Day of the Year'),
], default='date', compute='_compute_repeat', readonly=False)
mon = fields.Boolean(string="Mon", compute='_compute_repeat', readonly=False)
tue = fields.Boolean(string="Tue", compute='_compute_repeat', readonly=False)
wed = fields.Boolean(string="Wed", compute='_compute_repeat', readonly=False)
thu = fields.Boolean(string="Thu", compute='_compute_repeat', readonly=False)
fri = fields.Boolean(string="Fri", compute='_compute_repeat', readonly=False)
sat = fields.Boolean(string="Sat", compute='_compute_repeat', readonly=False)
sun = fields.Boolean(string="Sun", compute='_compute_repeat', readonly=False)
repeat_day = fields.Selection([
(str(i), str(i)) for i in range(1, 32)
], compute='_compute_repeat', readonly=False)
repeat_week = fields.Selection([
('first', 'First'),
('second', 'Second'),
('third', 'Third'),
('last', 'Last'),
], default='first', compute='_compute_repeat', readonly=False)
repeat_weekday = fields.Selection([
('mon', 'Monday'),
('tue', 'Tuesday'),
('wed', 'Wednesday'),
('thu', 'Thursday'),
('fri', 'Friday'),
('sat', 'Saturday'),
('sun', 'Sunday'),
], string='Day Of The Week', compute='_compute_repeat', readonly=False)
repeat_month = fields.Selection([
('january', 'January'),
('february', 'February'),
('march', 'March'),
('april', 'April'),
('may', 'May'),
('june', 'June'),
('july', 'July'),
('august', 'August'),
('september', 'September'),
('october', 'October'),
('november', 'November'),
('december', 'December'),
], compute='_compute_repeat', readonly=False)
repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility')
@api.model
def _get_recurrence_fields(self):
return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number',
'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat',
'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday']
@api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year')
def _compute_repeat_visibility(self):
for task in self:
task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and task.repeat_on_year == 'date')
task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and task.repeat_on_year == 'day')
task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week'
task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year'
@api.depends('recurring_task')
def _compute_repeat(self):
rec_fields = self._get_recurrence_fields()
defaults = self.default_get(rec_fields)
for task in self:
for f in rec_fields:
if task.recurrence_id:
task[f] = task.recurrence_id[f]
else:
if task.recurring_task:
task[f] = defaults.get(f)
else:
task[f] = False
def _get_weekdays(self, n=1):
self.ensure_one()
if self.repeat_unit == 'week':
return [fn(n) for day, fn in DAYS.items() if self[day]]
return [DAYS.get(self.repeat_weekday)(n)]
@api.depends(
'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until',
'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri',
'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday')
def _compute_recurrence_message(self):
self.recurrence_message = False
for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()):
date = fields.Date.today()
number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after' else 5)
delta = task.repeat_interval if task.repeat_unit == 'day' else 1
recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates(
date + timedelta(days=delta),
task.repeat_interval,
task.repeat_unit,
task.repeat_type,
task.repeat_until,
task.repeat_on_month,
task.repeat_on_year,
task._get_weekdays(WEEKS.get(task.repeat_week)),
task.repeat_day,
task.repeat_week,
task.repeat_month,
count=number_occurrences)
date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format
task.recurrence_message = '<ul>'
for date in recurring_dates[:5]:
task.recurrence_message += '<li>%s</li>' % date.strftime(date_format)
if task.repeat_type == 'after' and task.repeat_number > 5 or task.repeat_type == 'forever' or len(recurring_dates) > 5:
task.recurrence_message += '<li>...</li>'
task.recurrence_message += '</ul>'
if task.repeat_type == 'until':
task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)}
def _is_recurrence_valid(self):
self.ensure_one()
return self.repeat_interval > 0 and\
(not self.repeat_show_dow or self._get_weekdays()) and\
(self.repeat_type != 'after' or self.repeat_number) and\
(self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today())
@api.depends('recurrence_id')
def _compute_recurring_count(self):
self.recurring_count = 0
recurring_tasks = self.filtered(lambda l: l.recurrence_id)
count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id')
tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count}
for task in recurring_tasks:
task.recurring_count = tasks_count.get(task.recurrence_id.id, 0)
@api.depends('partner_id.email')
def _compute_partner_email(self):
for task in self:
if task.partner_id and task.partner_id.email != task.partner_email:
task.partner_email = task.partner_id.email
def _inverse_partner_email(self):
for task in self:
if task.partner_id and task.partner_email != task.partner_id.email:
task.partner_id.email = task.partner_email
@api.depends('partner_id.phone')
def _compute_partner_phone(self):
for task in self:
if task.partner_id and task.partner_phone != task.partner_id.phone:
task.partner_phone = task.partner_id.phone
def _inverse_partner_phone(self):
for task in self:
if task.partner_id and task.partner_phone != task.partner_id.phone:
task.partner_id.phone = task.partner_phone
@api.depends('partner_email', 'partner_phone', 'partner_id')
def _compute_ribbon_message(self):
for task in self:
will_write_email = task.partner_id and task.partner_email != task.partner_id.email
will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone
if will_write_email and will_write_phone:
task.ribbon_message = _('By saving this change, the customer email and phone number will also be updated.')
elif will_write_email:
task.ribbon_message = _('By saving this change, the customer email will also be updated.')
elif will_write_phone:
task.ribbon_message = _('By saving this change, the customer phone number will also be updated.')
else:
task.ribbon_message = False
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('Error! You cannot create recursive hierarchy of tasks.'))
@api.constrains('allowed_user_ids')
def _check_no_portal_allowed(self):
for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'):
portal_users = task.allowed_user_ids.filtered('share')
if portal_users:
user_names = ', '.join(portal_users[:10].mapped('name'))
raise ValidationError(_("The project visibility setting doesn't allow portal users to see the project's tasks. (%s)", user_names))
def _compute_attachment_ids(self):
for task in self:
attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids
message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread
task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))]
@api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility')
def _compute_allowed_user_ids(self):
for task in self:
portal_users = task.allowed_user_ids.filtered('share')
internal_users = task.allowed_user_ids - portal_users
if task.project_id.privacy_visibility == 'followers':
task.allowed_user_ids |= task.project_id.allowed_internal_user_ids
task.allowed_user_ids -= portal_users
elif task.project_id.privacy_visibility == 'portal':
task.allowed_user_ids |= task.project_id.allowed_portal_user_ids
if task.project_id.privacy_visibility != 'portal':
task.allowed_user_ids -= portal_users
elif task.project_id.privacy_visibility != 'followers':
task.allowed_user_ids -= internal_users
@api.depends('create_date', 'date_end', 'date_assign')
def _compute_elapsed(self):
task_linked_to_calendar = self.filtered(
lambda task: task.project_id.resource_calendar_id and task.create_date
)
for task in task_linked_to_calendar:
dt_create_date = fields.Datetime.from_string(task.create_date)
if task.date_assign:
dt_date_assign = fields.Datetime.from_string(task.date_assign)
duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True)
task.working_hours_open = duration_data['hours']
task.working_days_open = duration_data['days']
else:
task.working_hours_open = 0.0
task.working_days_open = 0.0
if task.date_end:
dt_date_end = fields.Datetime.from_string(task.date_end)
duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True)
task.working_hours_close = duration_data['hours']
task.working_days_close = duration_data['days']
else:
task.working_hours_close = 0.0
task.working_days_close = 0.0
(self - task_linked_to_calendar).update(dict.fromkeys(
['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0))
@api.depends('stage_id', 'kanban_state')
def _compute_kanban_state_label(self):
for task in self:
if task.kanban_state == 'normal':
task.kanban_state_label = task.legend_normal
elif task.kanban_state == 'blocked':
task.kanban_state_label = task.legend_blocked
else:
task.kanban_state_label = task.legend_done
def _compute_access_url(self):
super(Task, self)._compute_access_url()
for task in self:
task.access_url = '/my/task/%s' % task.id
def _compute_access_warning(self):
super(Task, self)._compute_access_warning()
for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'):
task.access_warning = _(
"The task cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy of the project to 'Visible by following customers' in order to make it accessible by the recipient(s).")
@api.depends('child_ids.planned_hours')
def _compute_subtask_planned_hours(self):
for task in self:
task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids)
@api.depends('child_ids')
def _compute_subtask_count(self):
for task in self:
task.subtask_count = len(task._get_all_subtasks())
@api.onchange('company_id')
def _onchange_task_company(self):
if self.project_id.company_id != self.company_id:
self.project_id = False
@api.depends('project_id.company_id')
def _compute_company_id(self):
for task in self.filtered(lambda task: task.project_id):
task.company_id = task.project_id.company_id
@api.depends('project_id')
def _compute_stage_id(self):
for task in self:
if task.project_id:
if task.project_id not in task.stage_id.project_ids:
task.stage_id = task.stage_find(task.project_id.id, [
('fold', '=', False), ('is_closed', '=', False)])
else:
task.stage_id = False
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if default is None:
| |
= read_string(self.conn)
if not expressions:
expressions = set('*')
handlers.append((line_start, line_end, expressions))
BREAK_ON.handler_cache[filename] = handlers
finally:
BREAK_ON.handler_lock.release()
def command_clear_stepping(self):
tid = read_int(self.conn)
thread = get_thread_from_id(tid)
if thread is not None:
thread.stepping = STEPPING_NONE
def command_set_lineno(self):
tid = read_int(self.conn)
fid = read_int(self.conn)
lineno = read_int(self.conn)
try:
THREADS_LOCK.acquire()
THREADS[tid].cur_frame.f_lineno = lineno
newline = THREADS[tid].cur_frame.f_lineno
THREADS_LOCK.release()
with _SendLockCtx:
write_bytes(self.conn, SETL)
write_int(self.conn, 1)
write_int(self.conn, tid)
write_int(self.conn, newline)
except:
with _SendLockCtx:
write_bytes(self.conn, SETL)
write_int(self.conn, 0)
write_int(self.conn, tid)
write_int(self.conn, 0)
def command_execute_code(self):
# execute given text in specified frame
text = read_string(self.conn)
tid = read_int(self.conn) # thread id
fid = read_int(self.conn) # frame id
eid = read_int(self.conn) # execution id
frame_kind = read_int(self.conn)
repr_kind = read_int(self.conn)
thread, cur_frame = self.get_thread_and_frame(tid, fid, frame_kind)
if thread is not None and cur_frame is not None:
thread.run_on_thread(text, cur_frame, eid, frame_kind, repr_kind)
def execute_code_no_report(self, text, tid, fid, frame_kind):
# execute given text in specified frame, without sending back the results
thread, cur_frame = self.get_thread_and_frame(tid, fid, frame_kind)
if thread is not None and cur_frame is not None:
thread.run_locally_no_report(text, cur_frame, frame_kind)
def command_enum_children(self):
# execute given text in specified frame
text = read_string(self.conn)
tid = read_int(self.conn) # thread id
fid = read_int(self.conn) # frame id
eid = read_int(self.conn) # execution id
frame_kind = read_int(self.conn) # frame kind
thread, cur_frame = self.get_thread_and_frame(tid, fid, frame_kind)
if thread is not None and cur_frame is not None:
thread.enum_child_on_thread(text, cur_frame, eid, frame_kind)
def get_thread_and_frame(self, tid, fid, frame_kind):
thread = get_thread_from_id(tid)
cur_frame = None
if thread is not None:
cur_frame = thread.cur_frame
for i in xrange(fid):
cur_frame = cur_frame.f_back
return thread, cur_frame
def command_detach(self):
detach_threads()
# unload debugger DLL
global debugger_dll_handle
if debugger_dll_handle is not None:
k32 = ctypes.WinDLL('kernel32')
k32.FreeLibrary.argtypes = [ctypes.c_void_p]
k32.FreeLibrary(debugger_dll_handle)
debugger_dll_handle = None
with _SendLockCtx:
write_bytes(conn, DETC)
detach_process()
for callback in DETACH_CALLBACKS:
callback()
raise DebuggerExitException()
def command_last_ack(self):
last_ack_event.set()
DETACH_CALLBACKS = []
def new_thread_wrapper(func, posargs, kwargs):
cur_thread = new_thread()
try:
sys.settrace(cur_thread.trace_func)
func(*posargs, **kwargs)
finally:
THREADS_LOCK.acquire()
if not cur_thread.detach:
del THREADS[cur_thread.id]
THREADS_LOCK.release()
if not DETACHED:
report_thread_exit(cur_thread)
def report_new_thread(new_thread):
ident = new_thread.id
with _SendLockCtx:
write_bytes(conn, NEWT)
write_int(conn, ident)
def report_all_threads():
THREADS_LOCK.acquire()
all_threads = list(THREADS.values())
THREADS_LOCK.release()
for cur_thread in all_threads:
report_new_thread(cur_thread)
def report_thread_exit(old_thread):
ident = old_thread.id
with _SendLockCtx:
write_bytes(conn, EXTT)
write_int(conn, ident)
def report_exception(frame, exc_info, tid, break_type):
exc_type = exc_info[0]
exc_name = get_exception_name(exc_type)
exc_value = exc_info[1]
tb_value = exc_info[2]
if type(exc_value) is tuple:
# exception object hasn't been created yet, create it now
# so we can get the correct msg.
exc_value = exc_type(*exc_value)
excp_text = str(exc_value)
with _SendLockCtx:
write_bytes(conn, EXCP)
write_string(conn, exc_name)
write_int(conn, tid)
write_int(conn, break_type)
write_string(conn, excp_text)
def new_module(frame):
mod = Module(get_code_filename(frame.f_code))
MODULES.append((frame.f_code.co_filename, mod))
return frame.f_code, mod
def report_module_load(mod):
with _SendLockCtx:
write_bytes(conn, MODL)
write_int(conn, mod.module_id)
write_string(conn, mod.filename)
def report_step_finished(tid):
with _SendLockCtx:
write_bytes(conn, STPD)
write_int(conn, tid)
def report_breakpoint_bound(id):
with _SendLockCtx:
write_bytes(conn, BRKS)
write_int(conn, id)
def report_breakpoint_failed(id):
with _SendLockCtx:
write_bytes(conn, BRKF)
write_int(conn, id)
def report_breakpoint_hit(id, tid):
with _SendLockCtx:
write_bytes(conn, BRKH)
write_int(conn, id)
write_int(conn, tid)
def report_process_loaded(tid):
with _SendLockCtx:
write_bytes(conn, LOAD)
write_int(conn, tid)
def report_execution_error(exc_text, execution_id):
with _SendLockCtx:
write_bytes(conn, EXCE)
write_int(conn, execution_id)
write_string(conn, exc_text)
def report_execution_exception(execution_id, exc_info):
try:
exc_text = str(exc_info[1])
except:
exc_text = 'An exception was thrown'
report_execution_error(exc_text, execution_id)
def safe_hex_repr(obj):
try:
return hex(obj)
except:
return None
def get_object_len(obj):
try:
return len(obj)
except:
return None
def report_execution_result(execution_id, result, repr_kind = PYTHON_EVALUATION_RESULT_REPR_KIND_NORMAL):
if repr_kind == PYTHON_EVALUATION_RESULT_REPR_KIND_NORMAL:
flags = 0
obj_repr = safe_repr(result)
obj_len = get_object_len(result)
hex_repr = safe_hex_repr(result)
else:
flags = PYTHON_EVALUATION_RESULT_RAW
hex_repr = None
for cls, raw_repr in TYPES_WITH_RAW_REPR.items():
if isinstance(result, cls):
try:
obj_repr = raw_repr(result)
except:
obj_repr = None
break
obj_len = get_object_len(obj_repr)
if repr_kind == PYTHON_EVALUATION_RESULT_REPR_KIND_RAWLEN:
obj_repr = None
res_type = type(result)
type_name = type(result).__name__
with _SendLockCtx:
write_bytes(conn, EXCR)
write_int(conn, execution_id)
write_object(conn, res_type, obj_repr, hex_repr, type_name, obj_len, flags)
def report_children(execution_id, children):
children = [(name, expression, flags, safe_repr(result), safe_hex_repr(result), type(result), type(result).__name__, get_object_len(result)) for name, expression, result, flags in children]
with _SendLockCtx:
write_bytes(conn, CHLD)
write_int(conn, execution_id)
write_int(conn, len(children))
for name, expression, flags, obj_repr, hex_repr, res_type, type_name, obj_len in children:
write_string(conn, name)
write_string(conn, expression)
write_object(conn, res_type, obj_repr, hex_repr, type_name, obj_len, flags)
def get_code_filename(code):
return path.abspath(code.co_filename)
NONEXPANDABLE_TYPES = [int, str, bool, float, object, type(None), unicode]
try:
NONEXPANDABLE_TYPES.append(long)
except NameError: pass
def write_object(conn, obj_type, obj_repr, hex_repr, type_name, obj_len, flags = 0):
write_string(conn, obj_repr)
write_string(conn, hex_repr)
if obj_type is SynthesizedValue:
write_string(conn, '')
else:
write_string(conn, type_name)
if obj_type not in NONEXPANDABLE_TYPES and obj_len != 0:
flags |= PYTHON_EVALUATION_RESULT_EXPANDABLE
try:
for cls in TYPES_WITH_RAW_REPR:
if issubclass(obj_type, cls):
flags |= PYTHON_EVALUATION_RESULT_HAS_RAW_REPR
break
except: # guard against broken issubclass for types which aren't actually types, like vtkclass
pass
write_int(conn, obj_len or 0)
write_int(conn, flags)
debugger_thread_id = -1
_INTERCEPTING_FOR_ATTACH = False
def intercept_threads(for_attach = False):
thread.start_new_thread = thread_creator
thread.start_new = thread_creator
# If threading has already been imported (i.e. we're attaching), we must hot-patch threading._start_new_thread
# so that new threads started using it will be intercepted by our code.
#
# On the other hand, if threading has not been imported, we must not import it ourselves, because it will then
# treat the current thread as the main thread, which is incorrect when attaching because this code is executing
# on an ephemeral debugger attach thread that will go away shortly. We don't need to hot-patch it in that case
# anyway, because it will pick up the new thread.start_new_thread that we have set above when it's imported.
global _threading
if _threading is None and 'threading' in sys.modules:
import threading
_threading = threading
_threading._start_new_thread = thread_creator
global _INTERCEPTING_FOR_ATTACH
_INTERCEPTING_FOR_ATTACH = for_attach
def attach_process(port_num, debug_id, debug_options, report = False, block = False):
global conn
for i in xrange(50):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(('127.0.0.1', port_num))
write_string(conn, debug_id)
write_int(conn, 0) # success
break
except:
import time
time.sleep(50./1000)
else:
raise Exception('failed to attach')
attach_process_from_socket(conn, debug_options, report, block)
def attach_process_from_socket(sock, debug_options, report = False, block = False):
global conn, attach_sent_break, DETACHED, DEBUG_STDLIB, BREAK_ON_SYSTEMEXIT_ZERO, DJANGO_DEBUG
BREAK_ON_SYSTEMEXIT_ZERO = 'BreakOnSystemExitZero' in debug_options
DJANGO_DEBUG = 'DjangoDebugging' in debug_options
if '' in PREFIXES:
# If one or more of the prefixes are empty, we can't reliably distinguish stdlib
# from user code, so override stdlib-only mode and allow to debug everything.
DEBUG_STDLIB = True
else:
DEBUG_STDLIB = 'DebugStdLib' in debug_options
wait_on_normal_exit = 'WaitOnNormalExit' in debug_options
wait_on_abnormal_exit = 'WaitOnAbnormalExit' in debug_options
def _excepthook(exc_type, exc_value, exc_tb):
# Display the exception and wait on exit
if exc_type is SystemExit:
if (wait_on_abnormal_exit and exc_value.code) or (wait_on_normal_exit and not exc_value.code):
print_exception(exc_type, exc_value, exc_tb)
do_wait()
else:
print_exception(exc_type, exc_value, exc_tb)
if wait_on_abnormal_exit:
do_wait()
sys.excepthook = sys.__excepthook__ = _excepthook
conn = sock
attach_sent_break = False
# start the debugging loop
global debugger_thread_id
debugger_thread_id = _start_new_thread(DebuggerLoop(conn).loop, ())
for mod_name, mod_value in sys.modules.items():
try:
filename = getattr(mod_value, '__file__', None)
if filename is not None:
try:
fullpath = path.abspath(filename)
except:
pass
else:
MODULES.append((filename, Module(fullpath)))
except:
traceback.print_exc()
if report:
THREADS_LOCK.acquire()
all_threads = list(THREADS.values())
if block:
main_thread = THREADS[thread.get_ident()]
THREADS_LOCK.release()
for cur_thread in all_threads:
report_new_thread(cur_thread)
for filename, module in MODULES:
report_module_load(module)
DETACHED = False
if block:
main_thread.block(lambda: report_process_loaded(thread.get_ident()))
# intercept all new thread requests
if not _INTERCEPTING_FOR_ATTACH:
intercept_threads()
if 'RedirectOutput' in debug_options:
enable_output_redirection()
# Try to detach cooperatively, notifying the debugger as we do so.
def detach_process_and_notify_debugger():
if DebuggerLoop.instance:
try:
DebuggerLoop.instance.command_detach()
except DebuggerExitException: # successfully detached
return
except: # swallow anything else, and forcibly detach below
pass
detach_process()
def detach_process():
global DETACHED
DETACHED = True
if not _INTERCEPTING_FOR_ATTACH:
if isinstance(sys.stdout, _DebuggerOutput):
sys.stdout = sys.stdout.old_out
if isinstance(sys.stderr, _DebuggerOutput):
sys.stderr = sys.stderr.old_out
if not _INTERCEPTING_FOR_ATTACH:
thread.start_new_thread = _start_new_thread
thread.start_new = _start_new_thread
def detach_threads():
# tell all threads to stop tracing...
THREADS_LOCK.acquire()
all_threads = list(THREADS.items())
THREADS_LOCK.release()
for tid, pyThread in all_threads:
if not _INTERCEPTING_FOR_ATTACH:
pyThread.detach = True
pyThread.stepping = STEPPING_BREAK
if pyThread._is_blocked:
pyThread.unblock()
if not _INTERCEPTING_FOR_ATTACH:
THREADS_LOCK.acquire()
THREADS.clear()
THREADS_LOCK.release()
BREAKPOINTS.clear()
def new_thread(tid = None, set_break = False, frame = None):
# called during attach w/ a thread ID provided.
if tid == debugger_thread_id:
return None
cur_thread = Thread(tid)
THREADS_LOCK.acquire()
THREADS[cur_thread.id] = cur_thread
THREADS_LOCK.release()
cur_thread.push_frame(frame)
if set_break:
cur_thread.stepping = STEPPING_ATTACH_BREAK
if not DETACHED:
report_new_thread(cur_thread)
return cur_thread
def new_external_thread():
thread = new_thread()
if not attach_sent_break:
# we are still doing the attach, make this thread break.
thread.stepping = STEPPING_ATTACH_BREAK
elif SEND_BREAK_COMPLETE:
# user requested break all, make this | |
import cupy
from cupy.random import generator
from cupy import util
# TODO(beam2d): Implement many distributions
def beta(a, b, size=None, dtype=float):
"""Beta distribution.
Returns an array of samples drawn from the beta distribution. Its
probability density function is defined as
.. math::
f(x) = \\frac{x^{\\alpha-1}(1-x)^{\\beta-1}}{B(\\alpha,\\beta)},
Args:
a (float): Parameter of the beta distribution :math:`\\alpha`.
b (float): Parameter of the beta distribution :math:`\\beta`.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the beta distribution.
.. seealso::
:func:`numpy.random.beta`
"""
rs = generator.get_random_state()
return rs.beta(a, b, size, dtype)
def binomial(n, p, size=None, dtype=int):
"""Binomial distribution.
Returns an array of samples drawn from the binomial distribution. Its
probability mass function is defined as
.. math::
f(x) = \\binom{n}{x}p^x(1-p)^{n-x},
Args:
n (int): Trial number of the binomial distribution.
p (float): Success probability of the binomial distribution.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.int32` and
:class:`numpy.int64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the binomial distribution.
.. seealso::
:func:`numpy.random.binomial`
"""
rs = generator.get_random_state()
return rs.binomial(n, p, size, dtype)
def chisquare(df, size=None, dtype=float):
"""Chi-square distribution.
Returns an array of samples drawn from the chi-square distribution. Its
probability density function is defined as
.. math::
f(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}x^{k/2-1}e^{-x/2},
Args:
df (int or array_like of ints): Degree of freedom :math:`k`.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the chi-square distribution.
.. seealso::
:func:`numpy.random.chisquare`
"""
rs = generator.get_random_state()
return rs.chisquare(df, size, dtype)
def dirichlet(alpha, size=None, dtype=float):
"""Dirichlet distribution.
Returns an array of samples drawn from the dirichlet distribution. Its
probability density function is defined as
.. math::
f(x) = \\frac{\\Gamma(\\sum_{i=1}^K\\alpha_i)} \
{\\prod_{i=1}^{K}\\Gamma(\\alpha_i)} \
\\prod_{i=1}^Kx_i^{\\alpha_i-1},
Args:
alpha (array): Parameters of the dirichlet distribution
:math:`\\alpha`.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the dirichlet distribution.
.. seealso::
:func:`numpy.random.dirichlet`
"""
rs = generator.get_random_state()
return rs.dirichlet(alpha, size, dtype)
def exponential(scale, size=None, dtype=float):
"""Exponential distribution.
Returns an array of samples drawn from the exponential distribution. Its
probability density function is defined as
.. math::
f(x) = \\frac{1}{\\beta}\\exp (-\\frac{x}{\\beta}),
Args:
scale (float or array_like of floats): The scale parameter
:math:`\\beta`.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the exponential distribution.
.. seealso::
:func:`numpy.random.exponential`
"""
rs = generator.get_random_state()
return rs.exponential(scale, size, dtype)
def f(dfnum, dfden, size=None, dtype=float):
"""F distribution.
Returns an array of samples drawn from the f distribution. Its probability
density function is defined as
.. math::
f(x) = \\frac{1}{B(\\frac{d_1}{2},\\frac{d_2}{2})} \
\\left(\\frac{d_1}{d_2}\\right)^{\\frac{d_1}{2}} \
x^{\\frac{d_1}{2}-1} \
\\left(1+\\frac{d_1}{d_2}x\\right) \
^{-\\frac{d_1+d_2}{2}},
Args:
dfnum (float or array_like of floats): Parameter of the f distribution
:math:`d_1`.
dfden (float or array_like of floats): Parameter of the f distribution
:math:`d_2`.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the f distribution.
.. seealso::
:func:`numpy.random.f`
"""
rs = generator.get_random_state()
return rs.f(dfnum, dfden, size, dtype)
def gamma(shape, scale=1.0, size=None, dtype=float):
"""Gamma distribution.
Returns an array of samples drawn from the gamma distribution. Its
probability density function is defined as
.. math::
f(x) = \\frac{1}{\\Gamma(k)\\theta^k}x^{k-1}e^{-x/\\theta},
Args:
shape (array): Parameter of the gamma distribution :math:`k`.
scale (array): Parameter of the gamma distribution :math:`\\theta`
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:cupy.ndarray: Samples drawn from the gamma distribution.
.. seealso::
:func:`numpy.random.gamma`
"""
rs = generator.get_random_state()
return rs.gamma(shape, scale, size, dtype)
def geometric(p, size=None, dtype=int):
"""Geometric distribution.
Returns an array of samples drawn from the geometric distribution. Its
probability mass function is defined as
.. math::
f(x) = p(1-p)^{k-1},
Args:
p (float): Success probability of the geometric distribution.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.int32` and
:class:`numpy.int64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the geometric distribution.
.. seealso::
:func:`cupy.random.RandomState.geometric`
:func:`numpy.random.geometric`
"""
rs = generator.get_random_state()
return rs.geometric(p, size, dtype)
def gumbel(loc=0.0, scale=1.0, size=None, dtype=float):
"""Returns an array of samples drawn from a Gumbel distribution.
The samples are drawn from a Gumbel distribution with location ``loc``
and scale ``scale``.
Its probability density function is defined as
.. math::
f(x) = \\frac{1}{\\eta} \
\\exp\\left\\{ - \\frac{x - \\mu}{\\eta} \\right\\} \
\\exp\\left[-\\exp\\left\\{-\\frac{x - \\mu}{\\eta} \
\\right\\}\\right],
where :math:`\\mu` is ``loc`` and :math:`\\eta` is ``scale``.
Args:
loc (float): The location of the mode :math:`\\mu`.
scale (float): The scale parameter :math:`\\eta`.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the Gumbel distribution.
.. seealso::
:func:`numpy.random.gumbel`
"""
rs = generator.get_random_state()
return rs.gumbel(loc, scale, size, dtype)
def laplace(loc=0.0, scale=1.0, size=None, dtype=float):
"""Laplace distribution.
Returns an array of samples drawn from the laplace distribution. Its
probability density function is defined as
.. math::
f(x) = \\frac{1}{2b}\\exp\\left(-\\frac{|x-\\mu|}{b}\\right),
Args:
loc (float): The location of the mode :math:`\\mu`.
scale (float): The scale parameter :math:`b`.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the laplace distribution.
.. seealso::
:func:`numpy.random.laplace`
"""
rs = generator.get_random_state()
return rs.laplace(loc, scale, size, dtype)
def lognormal(mean=0.0, sigma=1.0, size=None, dtype=float):
"""Returns an array of samples drawn from a log normal distribution.
The samples are natural log of samples drawn from a normal distribution
with mean ``mean`` and deviation ``sigma``.
Args:
mean (float): Mean of the normal distribution.
sigma (float): Standard deviation of the normal distribution.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Samples drawn from the log normal distribution.
.. seealso:: :func:`numpy.random.lognormal`
"""
rs = generator.get_random_state()
return rs.lognormal(mean, sigma, size=size, dtype=dtype)
def normal(loc=0.0, scale=1.0, size=None, dtype=float):
"""Returns an array of normally distributed samples.
Args:
loc (float or array_like of floats): Mean of the normal distribution.
scale (float or array_like of floats):
Standard deviation of the normal distribution.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` types are allowed.
Returns:
cupy.ndarray: Normally distributed samples.
.. seealso:: :func:`numpy.random.normal`
"""
rs = generator.get_random_state()
x = rs.normal(0, 1, size, dtype)
cupy.multiply(x, scale, out=x)
cupy.add(x, loc, out=x)
return x
def multivariate_normal(mean, cov, size=None, check_valid='ignore', tol=1e-8,
dtype=float):
"""(experimental) Multivariate normal distribution.
Returns an array of samples drawn from the multivariate normal
distribution. Its probability density function is defined as
.. math::
f(x) = \\frac{1}{(2\\pi|\\Sigma|)^(n/2)} \
\\exp\\left(-\\frac{1}{2} \
(x-\\mu)^{\\top}\\Sigma^{-1}(x-\\mu)\\right),
Args:
mean (1-D array_like, of length N): Mean of the multivariate normal
distribution :math:`\\mu`.
cov (2-D array_like, of shape (N, N)): Covariance matrix
:math:`\\Sigma` of the multivariate normal distribution. It must be
symmetric and positive-semidefinite for proper sampling.
size (int or tuple of ints): The shape of the array. If ``None``, a
zero-dimensional array is generated.
check_valid ('warn', 'raise', 'ignore'): Behavior when the covariance
matrix is not positive semidefinite.
tol (float): Tolerance when checking the singular values in
covariance matrix.
dtype: Data type specifier. Only :class:`numpy.float32` and
:class:`numpy.float64` | |
observed_sf_units = int(parcel_observed_du['single'])
else:
observed_sf_units = 0
#Look up observed MF DU
if 'multi' in parcel_observed_du.keys():
observed_mf_units = int(parcel_observed_du['multi'])
else:
observed_mf_units = 0
print ' Observed %s single-family DU.' % (observed_sf_units)
print ' Observed %s multi-family DU.' % (observed_mf_units)
#Calculate difference between target and observed
sf_difference = target_sf_units - observed_sf_units
mf_difference = target_mf_units - observed_mf_units
print ' Target difference: %s single-family DU.' % (sf_difference)
print ' Target difference: %s multi-family DU.' % (mf_difference)
####Impute as needed to cover difference
# Imputation is needed somewhere
if (sf_difference != 0) | (mf_difference != 0):
# One of the residential categories requires no imputation
if (sf_difference == 0) | (mf_difference == 0):
if sf_difference != 0:
add_or_remove_sf_units(zone_id, sf_difference, observed_sf_units)
if mf_difference != 0:
add_or_remove_mf_units(zone_id, mf_difference, observed_mf_units)
# Both residential categories require imputation
else:
net_difference = sf_difference + mf_difference
# Both residential categories require additions, or both require subtractions
if ((sf_difference > 0) & (mf_difference > 0)) | ((sf_difference < 0) & (mf_difference < 0)):
add_or_remove_sf_units(zone_id, sf_difference, observed_sf_units)
add_or_remove_mf_units(zone_id, mf_difference, observed_mf_units)
# Single-family requires subtractions, and multi-family requires additions
elif (sf_difference < 0) & (mf_difference > 0):
# If |decrease| in single-family units exceeds |increase| in multi-family units
if abs(sf_difference) > abs(mf_difference):
# Take subset of the single-family decrease corresponding to the mult-family increase, and flip the type
remainder = flip_from_sf_to_mf_type(zone_id, mf_difference, observed_mf_units)
# Take rest of the single-family decrease, and remove these units, but only from among non-type-flipped parcels
add_or_remove_sf_units(zone_id, net_difference - remainder, observed_sf_units - (mf_difference - remainder))
# If |decrease| in single-family units is less than or equal to |increase| in multi-family units
elif abs(sf_difference) <= abs(mf_difference):
# Take all of the single-family decrease, and flip their type
remainder = flip_from_sf_to_mf_type(zone_id, abs(sf_difference), observed_mf_units)
# Take rest of the multifamily-increase, if any remains, and add units
if net_difference > 0:
add_or_remove_mf_units(zone_id, net_difference, observed_mf_units)
# If not all singlefamily could be type-flipped, then remove units
if remainder > 0:
allocated = abs(sf_difference) - remainder
add_or_remove_sf_units(zone_id, -remainder, observed_sf_units - allocated)
# Multi-family requires subtractions, and single-family requires additions
elif (mf_difference < 0) & (sf_difference > 0):
# If |decrease| in multi-family units exceeds |increase| in single-family units
if abs(mf_difference) > abs(sf_difference):
# Take subset of the multi-family decrease corresponding to the single-family increase, and flip the type
remainder = flip_from_mf_to_sf_type(zone_id, sf_difference, observed_sf_units)
# Take rest of the multi-family decrease, and remove these units, but only from among non-type-flipped parcels
add_or_remove_mf_units(zone_id, net_difference - remainder, observed_mf_units - (sf_difference - remainder))
# If |decrease| in multi-family units is less than or equal to |increase| in single-family units
elif abs(mf_difference) <= abs(sf_difference):
# Take all of the multi-family decrease, and flip their type
remainder = flip_from_mf_to_sf_type(zone_id, abs(mf_difference), observed_sf_units)
# Take rest of the single-increase, if any remains, and add units
if net_difference > 0:
add_or_remove_sf_units(zone_id, net_difference, observed_sf_units)
# If not all multifamily could be type-flipped, then remove units
if remainder > 0:
allocated = abs(mf_difference) - remainder
add_or_remove_mf_units(zone_id, -remainder, observed_mf_units - allocated)
else:
#TAZ currently has ZERO parcels, add an artificial parcel
print ' No parcels in this zone. Adding synthetic parcel and needed residential units by type.'
new_pid = add_parcel(zone_id)
if target_sf_units > 0:
new_res_buildings.append([new_pid, 'single', target_sf_units])
if target_mf_units > 0:
new_res_buildings.append([new_pid, 'multi', target_mf_units])
print parcels.groupby('county_id').residential_units.sum()
parcels.residential_units.loc[res_parcel_updates.index] = parcels.residential_units.loc[res_parcel_updates.index].fillna(0) + res_parcel_updates.values
parcels.imputation_flag.loc[res_parcel_updates.index] = parcels.imputation_flag.loc[res_parcel_updates.index] + ', du_zonetarget' ## Populate imputation flag for affected records
print parcels.groupby('county_id').residential_units.sum()
new_res_buildings_df = pd.DataFrame(new_res_buildings, columns = ['gid', 'res_type', 'residential_units'])
new_nonres_buildings_df = pd.DataFrame(new_nonres_buildings, columns = ['gid', 'type', 'non_residential_sqft'])
new_res_buildings_df = pd.merge(new_res_buildings_df, parcels[['county_id', 'taz']], left_on = 'gid', right_index=True)
new_nonres_buildings_df = pd.merge(new_nonres_buildings_df, parcels[['county_id', 'taz']], left_on = 'gid', right_index=True)
print new_res_buildings_df.groupby('county_id').residential_units.sum()
print new_nonres_buildings_df.groupby('county_id').non_residential_sqft.sum()
##Deal with parcels where development type id is unknown, imputing using Costar/Redfin
problematic = parcels[parcels.development_type_id.isnull() & (parcels.res_type=='other')][['county_id','improvement_value','year_built','stories','sqft_per_unit','residential_units','non_residential_sqft','building_sqft','res_type','land_use_type_id','development_type_id','redfin_home_type', 'costar_property_type','costar_secondary_type']]
##Where no dev type, but we can get dev type from costar, then use costar for the dev type!
##Tag these as nonresidential dev_type based on costar type designation
problematic_nonres = problematic[(~(problematic.costar_property_type == '')) & ((problematic.year_built>0)|(problematic.improvement_value>0)|(problematic.stories>0)|(problematic.building_sqft>0)|(problematic.non_residential_sqft>0))]
##Tag these as residential res_type/dev_type based on redfin type designation
problematic_res = problematic[(~(problematic.redfin_home_type == '')) & (problematic.costar_property_type == '') & ((problematic.year_built>0)|(problematic.improvement_value>0)|(problematic.stories>0)|(problematic.sqft_per_unit>0)|(problematic.building_sqft>0))] ##2810
##After the above, export the county_id/land_use_type_id's that remain as a diagnostic output for Mike to investigate, then assume the rest have dev_type "unknown"
##Double check that all records with res_type 'single' or 'multi' have the appropriate development type
#Map between costar types and development_type_id categories
costar_devtype_map = {'Retail':'RT',
'Office':'OF',
'Industrial':'IW',
'Flex':'IW',
'Specialty':'OF',
'Retail (Strip Center)':'RT',
'Retail (Neighborhood Center)':'RT',
'Hospitality':'HO',
'Health Care':'HP',
'Retail (Community Center)':'RT',
'Sports & Entertainment':'RT',
'Retail (Power Center)':'RT',
'Retail (Regional Mall)':'RT',
'Retail (Lifestyle Center)':'RT',
'Retail (Super Regional Mall)':'RT',
'Office (Strip Center)':'OF',
'Retail (Theme/Festival Center)':'RT',
'Office (Neighborhood Center)':'OF',
'Office (Lifestyle Center)':'OF',
'Retail (Outlet Center)':'RT',
'Specialty (Neighborhood Center)':'RT',
'Industrial (Lifestyle Center)':'IW',
'Office (Regional Mall)':'OF',
'Flex (Strip Center)':'OF',
'General Retail':'RT',
'General Retail (Strip Center)':'RT',
'Hospitality (Neighborhood Center)':'HO',
'Office (Super Regional Mall)':'OF'}
for costar_type in costar_devtype_map.keys():
idx = problematic_nonres[problematic_nonres.costar_property_type == costar_type].index.values
parcels.development_type_id.loc[idx] = costar_devtype_map[costar_type]
parcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', costar_type'
#Map between redfin types and res_type categories
redfin_devtype_map1 = {'Single Family Residential':'single',
'Condo/Coop':'multi',
'Townhouse':'multi',
'Vacant Land':'other',
'Multi-Family (2-4 Unit)':'multi',
'Unknown':'other',
'Other':'other',
'Mobile/Manufactured Home':'other',
'Multi-Family (5+ Unit)':'multi',
'Ranch':'single'}
#Map between redfin types and development_type_id categories
redfin_devtype_map2 = {'Single Family Residential':'SF',
'Condo/Coop':'MF',
'Townhouse':'MF',
'Vacant Land':'other',
'Multi-Family (2-4 Unit)':'MF',
'Unknown':'other',
'Other':'other',
'Mobile/Manufactured Home':'SF',
'Multi-Family (5+ Unit)':'MF',
'Ranch':'SF'}
for redfin_type in redfin_devtype_map1.keys():
idx = problematic_res[problematic_res.redfin_home_type == redfin_type].index.values
parcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', redfin_type'
parcels.res_type.loc[idx] = redfin_devtype_map1[redfin_type]
for redfin_type in redfin_devtype_map2.keys():
idx = problematic_res[problematic_res.redfin_home_type == redfin_type].index.values
parcels.development_type_id.loc[idx] = redfin_devtype_map2[redfin_type]
parcels.development_type_id[parcels.development_type_id.isnull()*(parcels.res_type=='single')] = 'SF'
parcels.development_type_id[parcels.development_type_id.isnull()*(parcels.res_type=='multi')] = 'MF'
parcels.development_type_id[parcels.development_type_id.isnull()] = 'other' ##These are the parcels to print out lu_type diagnostics for...
##Note these 'other' parcels will most typically be vacant
#Standardize the development_type_id coding
devcode_map = {"HM":"MF",
"HS":"SF",
"HT":"MF",
"ME":"MR",
"RB":"RT",
"RC":"BR",
"REC":"BR",
"RS":"RT",
" ":"other",
"VT":"LD",
"VAC":"LD",
"VA":"LD",
}
for devcode in devcode_map.keys():
parcels.development_type_id[parcels.development_type_id == devcode] = devcode_map[devcode]
parcels['proportion_undevelopable'] = 0.0 ##Populate this with spatial function
parcels.land_use_type_id[parcels.land_use_type_id.isnull()] = ' '
parcels.development_type_id.value_counts()
parcels.county_id[parcels.county_id==' '] = 0
parcels.county_id = parcels.county_id.astype('int')
#SF-specific devtype correction due to residential sometimes being mistakenly coded as RT
parcels.development_type_id[(parcels.county_id == 75) & (parcels.res_type == 'single') & (parcels.development_type_id == 'RT')] = 'SF'
parcels.development_type_id[(parcels.county_id == 75) & (parcels.res_type == 'multi') & (parcels.development_type_id == 'RT')] = 'MF'
# Assign development type id based on gov_type status
parcels.gov_type = parcels.gov_type.fillna(0).astype('int')
parcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 12)] = 'HP'
parcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 17)] = 'SC'
parcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 18)] = 'SH'
parcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 19)] = 'GV'
# Set SCL common areas as undevelopable
scl_parcels = parcels[parcels.county_id == 85]
scl_parcels = scl_parcels[scl_parcels.apn.str.startswith('-')]
parcels.proportion_undevelopable.loc[scl_parcels.index.values] = 1.0
##############
###BUILDINGS##
##############
idx = (parcels.improvement_value > 0) | (parcels.year_built > 0) | (parcels.building_sqft > 0) | (parcels.non_residential_sqft > 0) | (parcels.residential_units > 0) | (parcels.stories > 0) | (parcels.sqft_per_unit > 0) | ((parcels.costar_property_type.str.len()> 1) & (~parcels.costar_property_type.isin(['Land','Land (Community Center)']))) | ((parcels.redfin_home_type.str.len()> 1) & (~parcels.redfin_home_type.isin(['Vacant Land','Other','Unknown'])))
buildings = parcels[idx]
print len(buildings)
buildings = buildings[['county_id', 'land_use_type_id', 'res_type', 'improvement_value', 'year_assessed', 'year_built', 'building_sqft', 'non_residential_sqft', 'residential_units', 'sqft_per_unit', 'stories', 'development_type_id', 'taz', 'redfin_sale_price', 'redfin_sale_year', 'redfin_home_type', 'costar_elevators', 'costar_property_type', 'costar_secondary_type', 'costar_building_name', 'costar_rent']].copy(deep=True)
buildings['building_id'] = np.arange(len(buildings)) + 1
buildings.index.name = 'parcel_id'
buildings = buildings.reset_index()
## Incorporate the synthetic buildings added as necessary in some situations to match aggregate unit targets
new_res_buildings_df = new_res_buildings_df.rename(columns = {'gid':'parcel_id'})
new_res_buildings_df['land_use_type_id'] = 'from_imputation'
new_res_buildings_df['development_type_id'] = ''
new_res_buildings_df.development_type_id[new_res_buildings_df.res_type == 'single'] = 'SF'
new_res_buildings_df.development_type_id[new_res_buildings_df.res_type == 'multi'] = 'MF'
new_res_buildings_df['building_id'] = np.arange(buildings.building_id.max() + 1, buildings.building_id.max() + len(new_res_buildings_df) + 1)
new_nonres_buildings_df = new_nonres_buildings_df.rename(columns = {'gid':'parcel_id'})
new_nonres_buildings_df['land_use_type_id'] = 'from_imputation'
new_nonres_buildings_df['development_type_id'] = 'OF'
new_nonres_buildings_df['building_id'] = np.arange(new_res_buildings_df.building_id.max() + 1, new_res_buildings_df.building_id.max() + len(new_nonres_buildings_df) + 1)
# Tag the associated parcels with "add_synth_bldg" in imputation flag
idx = np.unique(new_res_buildings_df.parcel_id)
parcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', add_synth_res_bldg'
idx = np.unique(new_nonres_buildings_df.parcel_id)
parcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', add_synth_nonres_bldg'
# Merge the synthetic buildings with the rest
buildings = pd.concat([buildings, new_res_buildings_df])
buildings = pd.concat([buildings, new_nonres_buildings_df])
## Building column cleaning/imputation
buildings.residential_units[buildings.residential_units.isnull()] = 0
buildings.non_residential_sqft[buildings.non_residential_sqft.isnull()] = 0
# Upper and lower bound configuration (move settings to separate config file?)
year_built_lower_bound = 1790
year_built_upper_bound = 2015
sqft_per_unit_lower_bound = 200
sqft_per_unit_upper_bound = 30000
targetvalues['year_built_av_nonres'] = buildings[(~buildings.res_type.isin(['single', 'multi'])) & (buildings.year_built > year_built_lower_bound) & (buildings.year_built < year_built_upper_bound)].groupby('taz').year_built.mean()
buildings = pd.merge(buildings, targetvalues, left_on = 'taz', right_index = True, how = 'left')
buildings = buildings.set_index('building_id')
## YEAR BUILT
# Residential building with out-of-bounds or null year_built- replace with observed zonal average of good data points
idx = (buildings.res_type.isin(['single', 'multi'])) & ((buildings.year_built < year_built_lower_bound) | (buildings.year_built > year_built_upper_bound) | (buildings.year_built.isnull()))
buildings.year_built[idx] = | |
<filename>backend/src/contaxy/managers/deployment/kubernetes.py
import os
import time
from datetime import datetime, timezone
from typing import Any, List, Literal, Optional
from kubernetes import client as kube_client
from kubernetes import config as kube_config
from kubernetes.client.models import (
V1Deployment,
V1DeploymentList,
V1Job,
V1JobList,
V1JobSpec,
V1ServiceList,
V1Status,
)
from kubernetes.client.rest import ApiException
from loguru import logger
from contaxy.config import settings
from contaxy.managers.deployment.kube_utils import (
build_deployment_metadata,
build_kube_deployment_config,
build_kube_service_config,
build_pod_template_spec,
build_project_network_policy_spec,
check_or_create_project_network_policy,
create_pvc,
create_service,
get_deployment_selection_labels,
get_label_selector,
get_pod,
map_kube_job,
map_kube_service,
wait_for_deletion,
wait_for_deployment,
wait_for_job,
)
from contaxy.managers.deployment.utils import (
DEFAULT_DEPLOYMENT_ACTION_ID,
NO_LOGS_MESSAGE,
Labels,
get_deployment_id,
split_image_name_and_tag,
)
from contaxy.operations import AuthOperations, DeploymentOperations, SystemOperations
from contaxy.operations.components import ComponentOperations
from contaxy.schema import Job, JobInput, ResourceAction, Service, ServiceInput
from contaxy.schema.deployment import DeploymentType, ServiceUpdate
from contaxy.schema.exceptions import (
ClientBaseError,
ClientValueError,
ResourceNotFoundError,
ServerBaseError,
)
from contaxy.utils.auth_utils import parse_userid_from_resource_name
class KubernetesDeploymentManager(DeploymentOperations):
def __init__(
self,
component_manager: ComponentOperations,
kube_namespace: str = None,
):
"""Initializes the Kubernetes Deployment Manager.
Args:
component_manager: Instance of the component manager that grants access to the other managers.
kube_namespace (str): Set the Kubernetes namespace to use. If it is not given, the manager will try to detect the namespace automatically.
"""
self._global_state = component_manager.global_state
self._request_state = component_manager.request_state
self._component_manager = component_manager
try:
# incluster config is the config given by a service account and it's role permissions
kube_config.load_incluster_config()
except kube_config.ConfigException:
kube_config.load_kube_config(context=os.getenv("CTXY_K8S_CONTEXT", None))
self.core_api = kube_client.CoreV1Api()
self.apps_api = kube_client.AppsV1Api()
self.batch_api = kube_client.BatchV1Api()
self.networking_api = kube_client.NetworkingV1Api()
if kube_namespace is None:
try:
# at this path the namespace the container is in is stored in Kubernetes deployment (see https://stackoverflow.com/questions/31557932/how-to-get-the-namespace-from-inside-a-pod-in-openshift)
with open(
"/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r"
) as namespace_file:
self.kube_namespace = namespace_file.read()
except FileNotFoundError:
# TODO: fix arguments
raise ServerBaseError("Could not detect the Kubernetes Namespace")
else:
self.kube_namespace = kube_namespace
# TODO: when we have performance problems in the future, replicate the watch logic from JupyterHub KubeSpawner to keep Pod & other resource information in memory? (see https://github.com/jupyterhub/kubespawner/blob/941585f0f7acb0f366c9979b6274b7f47356a630/kubespawner/reflector.py#L238)
@property
def _system_manager(self) -> SystemOperations:
return self._component_manager.get_system_manager()
@property
def _auth_manager(self) -> AuthOperations:
return self._component_manager.get_auth_manager()
def list_services(
self,
project_id: str,
deployment_type: Literal[
DeploymentType.SERVICE, DeploymentType.EXTENSION
] = DeploymentType.SERVICE,
) -> List[Service]:
label_selector = get_deployment_selection_labels(
project_id=project_id, deployment_type=deployment_type
)
try:
deployments: V1DeploymentList = self.apps_api.list_namespaced_deployment(
namespace=self.kube_namespace, label_selector=label_selector
)
return [map_kube_service(deployment) for deployment in deployments.items]
except ApiException:
return []
def deploy_service(
self,
project_id: str,
service: ServiceInput,
action_id: Optional[str] = None,
deployment_type: Literal[
DeploymentType.SERVICE, DeploymentType.EXTENSION
] = DeploymentType.SERVICE,
wait: bool = False,
) -> Service:
image_name, image_tag = split_image_name_and_tag(service.container_image)
self._system_manager.check_allowed_image(image_name, image_tag)
if service.display_name is None:
raise ClientValueError(
message=f"Could not create a service id for service with display name {service.display_name}",
explanation="A display name for the service must be provided.",
)
service_id = get_deployment_id(
project_id=project_id,
deployment_name=service.display_name,
deployment_type=deployment_type,
)
kube_service_config = build_kube_service_config(
service_id=service_id,
service=service,
project_id=project_id,
kube_namespace=self.kube_namespace,
)
kube_deployment_config, kube_deployment_pvc = build_kube_deployment_config(
service_id=service_id,
service=service,
project_id=project_id,
kube_namespace=self.kube_namespace,
auth_manager=self._auth_manager,
user_id=parse_userid_from_resource_name(
self._request_state.authorized_subject
),
)
check_or_create_project_network_policy(
network_policy=build_project_network_policy_spec(
project_id=project_id, kube_namespace=self.kube_namespace
),
networking_api=self.networking_api,
)
create_pvc(
pvc=kube_deployment_pvc,
kube_namespace=self.kube_namespace,
core_api=self.core_api,
)
create_service(
service_config=kube_service_config,
kube_namespace=self.kube_namespace,
core_api=self.core_api,
)
try:
deployment: V1Deployment = self.apps_api.create_namespaced_deployment(
namespace=self.kube_namespace, body=kube_deployment_config
)
if wait:
wait_for_deployment(
deployment_name=deployment.metadata.name,
kube_namespace=self.kube_namespace,
apps_api=self.apps_api,
)
except (ApiException, Exception) as e:
# Delete service again as the belonging deployment could not be created, but only when status code is not 409 as 409 indicates that the deployment already exists
if not hasattr(e, "status") or e.status != 409: # type: ignore
try:
self.core_api.delete_namespaced_service(
namespace=self.kube_namespace, name=service_id
)
except ApiException:
pass
# TODO: delete pvc here
raise ClientBaseError(
status_code=500,
message=f"Could not create namespaced deployment '{service.display_name}' with reason: {e}",
)
try:
transformed_service = map_kube_service(deployment)
except Exception as e:
# Delete already created resources upon an error
try:
self.core_api.delete_namespaced_service(
namespace=self.kube_namespace, name=service_id
)
except ApiException:
pass
try:
self.apps_api.delete_namespaced_deployment(
namespace=self.kube_namespace, name=service_id
)
except ApiException:
pass
# TODO: delete pvc here
raise ServerBaseError(
f"Could not transform deployment '{service.display_name}' with reason: {e}"
)
return transformed_service
def update_service(
self, project_id: str, service_id: str, service: ServiceUpdate
) -> Service:
# Service update is only implemented on DeploymentManagerWithDB wrapper
raise NotImplementedError()
def list_deploy_service_actions(
self, project_id: str, service: ServiceInput
) -> List[ResourceAction]:
# TODO: make some cluster checks?
return [
ResourceAction(
action_id=DEFAULT_DEPLOYMENT_ACTION_ID,
display_name=DEFAULT_DEPLOYMENT_ACTION_ID,
)
]
def get_service_metadata(self, project_id: str, service_id: str) -> Service:
try:
deployment: V1Deployment = self.apps_api.read_namespaced_deployment(
name=service_id, namespace=self.kube_namespace
)
# Make sure that the service belongs to the same contaxy namespace as the core-backend. Also, double check that this service really belongs to the project (even though the serviceId should be unique)
if (
deployment.metadata.labels[Labels.NAMESPACE.value]
!= settings.SYSTEM_NAMESPACE
or deployment.metadata.labels[Labels.PROJECT_NAME.value] != project_id
):
raise ResourceNotFoundError(
f"Could not get metadata of service '{service_id}' for project {project_id}."
)
return map_kube_service(deployment)
except ApiException:
raise ResourceNotFoundError(
f"Could not get metadata of service '{service_id}' for project {project_id}."
)
def delete_service(
self,
project_id: str,
service_id: str,
delete_volumes: bool = False,
retries: int = 0,
) -> None:
try:
status: V1Status = self.core_api.delete_namespaced_service(
name=service_id,
namespace=self.kube_namespace,
propagation_policy="Foreground",
)
if status.status == "Failure":
raise ServerBaseError(
f"Could not delete Kubernetes service for service-id {service_id}"
)
status = self.apps_api.delete_namespaced_deployment(
name=service_id,
namespace=self.kube_namespace,
propagation_policy="Foreground",
)
if status.status == "Failure":
raise ServerBaseError(
f"Could not delete Kubernetes deployment for service-id {service_id}"
)
if delete_volumes:
status = self.core_api.delete_namespaced_persistent_volume_claim(
namespace=self.kube_namespace, name=service_id
)
if status.status == "Failure":
# TODO: if we work with a queue system, then add it to a deletion queue
# log(
# f"Could not delete Kubernetes Persistent Volume Claim for service-id {service_id}"
# )
raise ServerBaseError(
f"Could not delete Kubernetes Persistent Volume Claim for service-id {service_id}"
)
# wait some time for the deployment to be deleted
wait_for_deletion(
self.apps_api, self.kube_namespace, deployment_id=service_id
)
except Exception:
# TODO: add resources to delete to a queue instead of deleting directly? This would have the advantage that even if an operation failes, it is repeated. Also, if the delete endpoint is called multiple times, it is only added once to the queue
# if retries < max_retries:
# try:
# return self.delete_service(
# project_id=project_id,
# service_id=service_id,
# delete_volumes=delete_volumes,
# retries=retries + 1,
# )
# except Exception:
# pass
raise ClientBaseError(
status_code=500,
message=f"Could not delete service '{service_id}'.",
)
def delete_services(
self,
project_id: str,
) -> None:
label_selector = get_deployment_selection_labels(
project_id=project_id, deployment_type=DeploymentType.SERVICE
)
try:
services: V1ServiceList = self.core_api.list_namespaced_service(
namespace=self.kube_namespace, label_selector=label_selector
)
for service in services.items:
self.core_api.delete_namespaced_service(
name=service.metadata.name, namespace=self.kube_namespace
)
status = self.apps_api.delete_collection_namespaced_deployment(
namespace=self.kube_namespace,
label_selector=label_selector,
propagation_policy="Foreground",
)
if status.status == "Failure":
raise ServerBaseError(
f"Could not delete Kubernetes deployments for project '{project_id}'"
)
status = self.core_api.delete_collection_namespaced_persistent_volume_claim(
namespace=self.kube_namespace,
label_selector=label_selector,
propagation_policy="Foreground",
)
if status.status == "Failure":
raise ServerBaseError(
f"Could not delete Kubernetes volumes for project '{project_id}'"
)
except Exception as e:
logger.error(f"Error in Kubernetes->delete_services. Reason: {e}")
raise ClientBaseError(
500, f"Could not delete services for project '{project_id}'"
)
def get_service_logs(
self,
project_id: str,
service_id: str,
lines: Optional[int] = None,
since: Optional[datetime] = None,
) -> str:
try:
pod = get_pod(
project_id=project_id,
service_id=service_id,
kube_namespace=self.kube_namespace,
core_api=self.core_api,
)
except Exception:
pod = None
if pod is None:
raise ResourceNotFoundError(
f"Could not find service {service_id} to read logs from."
)
# TODO: remove as this should not be a concern of the get_logs function
try:
# Give some time to let the container within the pod start
start = time.time()
timeout = 60
while True:
if pod.status.phase in ["Pending", "ContainerCreating"]:
try:
pod = get_pod(
project_id=project_id,
service_id=service_id,
kube_namespace=self.kube_namespace,
core_api=self.core_api,
)
time.sleep(1)
except Exception:
pass
else:
break
if time.time() - start > timeout:
raise ServerBaseError(
f"Could not read logs from service {service_id} due to status error."
)
since_seconds = None
if since:
since_seconds = (
int((datetime.now(timezone.utc) - since).total_seconds()) + 1
)
try:
return self.core_api.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=self.kube_namespace,
pretty="true",
tail_lines=lines if lines else None,
since_seconds=since_seconds,
)
except ApiException:
raise ServerBaseError(f"Could not read logs of service {service_id}.")
except Exception:
return NO_LOGS_MESSAGE
def list_jobs(self, project_id: str) -> List[Job]:
label_selector = get_label_selector(
[
(Labels.NAMESPACE.value, settings.SYSTEM_NAMESPACE),
(Labels.PROJECT_NAME.value, project_id),
(Labels.DEPLOYMENT_TYPE.value, DeploymentType.JOB.value),
]
)
try:
jobs: V1JobList = self.batch_api.list_namespaced_job(
namespace=self.kube_namespace, label_selector=label_selector
)
except ApiException:
return []
return [map_kube_job(job) for job in jobs.items]
def deploy_job(
self,
project_id: str,
job: JobInput,
action_id: Optional[str] = None,
wait: bool = False,
) -> Job:
image_name, image_tag = split_image_name_and_tag(job.container_image)
self._system_manager.check_allowed_image(image_name, image_tag)
if job.display_name is None:
raise ClientValueError(
message=f"Could not create service id for job {job.display_name}",
explanation="The display name for a service must be set.",
)
deployment_id = get_deployment_id(
project_id=project_id,
deployment_name=job.display_name,
deployment_type=DeploymentType.JOB,
)
metadata = build_deployment_metadata(
kube_namespace=self.kube_namespace,
project_id=project_id,
deployment_id=deployment_id,
display_name=job.display_name.replace(" ", "__"),
labels=job.metadata,
compute_resources=job.compute,
endpoints=job.endpoints,
deployment_type=DeploymentType.JOB,
user_id=parse_userid_from_resource_name(
self._request_state.authorized_subject
),
)
| |
"%(minute)d%(minute_label)s, %(second)d%(second_label)s"
>>> print(f.toString((2*86400 + 3*3600 + 5*60 + 2, "second", "group_deltatime"),
... useThisFormat=delta_format))
2 days, 3 hours, 5 minutes, 2 seconds
"""
def __init__(self, unit_format_dict = default_unit_format_dict,
unit_label_dict = default_unit_label_dict,
time_format_dict = default_time_format_dict,
ordinate_names = default_ordinate_names):
"""
unit_format_dict: Key is unit type (eg, 'inHg'), value is a
string format ("%.1f")
unit_label_dict: Key is unit type (eg, 'inHg'), value is a
label (" inHg")
time_format_dict: Key is a context (eg, 'week'), value is a
strftime format ("%d-%b-%Y %H:%M")."""
self.unit_format_dict = unit_format_dict
self.unit_label_dict = unit_label_dict
# Make a copy of the time format dictionary. This will stop the
# unwanted interpolation of key delta_time.
self.time_format_dict = dict(time_format_dict)
self.ordinate_names = ordinate_names
# Add new keys for backwards compatibility on old skin dictionaries:
self.time_format_dict.setdefault('ephem_day', "%H:%M")
self.time_format_dict.setdefault('ephem_year', "%d-%b-%Y %H:%M")
@staticmethod
def fromSkinDict(skin_dict):
"""Factory static method to initialize from a skin dictionary."""
try:
unit_format_dict = skin_dict['Units']['StringFormats']
except KeyError:
unit_format_dict = default_unit_format_dict
try:
unit_label_dict = skin_dict['Units']['Labels']
except KeyError:
unit_label_dict = default_unit_label_dict
try:
time_format_dict = skin_dict['Units']['TimeFormats']
except KeyError:
time_format_dict = default_time_format_dict
try:
ordinate_names = weeutil.weeutil.option_as_list(
skin_dict['Units']['Ordinates']['directions'])
except KeyError:
ordinate_names = default_ordinate_names
return Formatter(unit_format_dict,
unit_label_dict,
time_format_dict,
ordinate_names)
def get_format_string(self, unit):
"""Return a suitable format string."""
# First, try my internal format dict
if unit in self.unit_format_dict:
return self.unit_format_dict[unit]
# If that didn't work, try the default dict:
elif unit in default_unit_format_dict:
return default_unit_format_dict[unit]
else:
# Can't find one. Return a generic formatter:
return '%f'
def get_label_string(self, unit, plural=True):
"""Return a suitable label.
This function looks up a suitable label in the unit_label_dict. If the
associated value is a string, it returns it. If it is a tuple or a list,
then it is assumed the first value is a singular version of the label
(e.g., "foot"), the second a plural version ("feet"). If the parameter
plural=False, then the singular version is returned. Otherwise, the
plural version.
"""
# First, try my internal label dictionary:
if unit in self.unit_label_dict:
label = self.unit_label_dict[unit]
# If that didn't work, try the default label dictionary:
elif unit in default_unit_label_dict:
label = default_unit_label_dict[unit]
else:
# Can't find a label. Just return an empty string:
return u''
# Is the label a tuple or list?
if isinstance(label, (tuple, list)):
# Yes. Return the singular or plural version as requested
return label[1] if plural else label[0]
else:
# No singular/plural version. It's just a string. Return it.
return label
def toString(self, val_t, context='current', addLabel=True,
useThisFormat=None, None_string=None,
localize=True):
"""Format the value as a unicode string.
val_t: The value to be formatted as a value tuple.
context: A time context (eg, 'day').
[Optional. If not given, context 'current' will be used.]
addLabel: True to add a unit label (eg, 'mbar'), False to not.
[Optional. If not given, a label will be added.]
useThisFormat: An optional string or strftime format to be used.
[Optional. If not given, the format given in the initializer will
be used.]
None_string: A string to be used if the value val is None.
[Optional. If not given, the string given by unit_format_dict['NONE']
will be used.]
localize: True to localize the results. False otherwise
"""
if val_t is None or val_t[0] is None:
if None_string is None:
val_str = self.unit_format_dict.get('NONE', u'N/A')
else:
# Make sure the "None_string" is, in fact, a string
if isinstance(None_string, six.string_types):
val_str = None_string
else:
# Coerce to a string.
val_str = str(None_string)
addLabel = False
elif val_t[1] == "unix_epoch":
# Different formatting routines are used if the value is a time.
if useThisFormat is None:
val_str = time.strftime(self.time_format_dict.get(context, "%d-%b-%Y %H:%M"),
time.localtime(val_t[0]))
else:
val_str = time.strftime(useThisFormat, time.localtime(val_t[0]))
addLabel = False
elif val_t[2] == "group_deltatime":
# Get a delta-time format string. Use a default if the user did not supply one:
if useThisFormat is None:
format_string = self.time_format_dict.get("delta_time",
default_time_format_dict["delta_time"])
else:
format_string = useThisFormat
# Now format the delta time, using the function delta_secs_to_string:
val_str = self.delta_secs_to_string(val_t[0], format_string)
addLabel = False
else:
# It's not a time. It's a regular value. Get a suitable format string:
if useThisFormat is None:
# No user-specified format string. Go get one:
format_string = self.get_format_string(val_t[1])
else:
# User has specified a string. Use it.
format_string = useThisFormat
if localize:
# Localization requested. Use locale with the supplied format:
val_str = locale.format_string(format_string, val_t[0])
else:
# No localization. Just format the string.
val_str = format_string % val_t[0]
# Make sure the results are in unicode:
val_ustr = six.ensure_text(val_str)
# Add a label, if requested:
if addLabel:
# Make sure the label is in unicode before tacking it on to the end
label = self.get_label_string(val_t[1], plural=(not val_t[0]==1))
val_ustr += six.ensure_text(label)
return val_ustr
def to_ordinal_compass(self, val_t):
if val_t[0] is None:
return self.ordinate_names[-1]
_sector_size = 360.0 / (len(self.ordinate_names)-1)
_degree = (val_t[0] + _sector_size/2.0) % 360.0
_sector = int(_degree / _sector_size)
return self.ordinate_names[_sector]
def delta_secs_to_string(self, secs, label_format):
"""Convert elapsed seconds to a string
Example:
>>> f = Formatter()
>>> print(f.delta_secs_to_string(3*86400+21*3600+7*60+11,
... default_time_format_dict["delta_time"]))
3 days, 21 hours, 7 minutes
"""
etime_dict = {}
for (label, interval) in (('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)):
amt = int(secs // interval)
etime_dict[label] = amt
etime_dict[label + '_label'] = self.get_label_string(label, not amt == 1)
secs %= interval
ans = locale.format_string(label_format, etime_dict)
return ans
#==============================================================================
# class Converter
#==============================================================================
class Converter(object):
"""Holds everything necessary to do conversions to a target unit system."""
def __init__(self, group_unit_dict=USUnits):
"""Initialize an instance of Converter
group_unit_dict: A dictionary holding the conversion information.
Key is a unit_group (eg, 'group_pressure'), value is the target
unit type ('mbar')"""
self.group_unit_dict = group_unit_dict
@staticmethod
def fromSkinDict(skin_dict):
"""Factory static method to initialize from a skin dictionary."""
try:
group_unit_dict = skin_dict['Units']['Groups']
except KeyError:
group_unit_dict = USUnits
return Converter(group_unit_dict)
def convert(self, val_t):
"""Convert a value from a given unit type to the target type.
val_t: A value tuple with the datum, a unit type, and a unit group
returns: A value tuple in the new, target unit type. If the input
value tuple contains an unknown unit type an exception of type KeyError
will be thrown. If the input value tuple has either a unit
type of None, or a group type of None (but not both), then an
exception of type KeyError will be thrown. If both the
unit and group are None, then the original val_t will be
returned (i.e., no conversion is done).
Examples:
>>> p_m = (1016.5, 'mbar', 'group_pressure')
>>> c = Converter()
>>> print("%.3f %s %s" % c.convert(p_m))
30.017 inHg group_pressure
Try an unspecified unit type:
>>> p2 = (1016.5, None, None)
>>> print(c.convert(p2))
(1016.5, None, None)
Try a bad unit type:
>>> p3 = (1016.5, 'foo', 'group_pressure')
>>> try:
... print(c.convert(p3))
... except KeyError:
... print("Exception thrown")
Exception thrown
Try a bad group type:
>>> p4 = (1016.5, 'mbar', 'group_foo')
>>> try:
... print(c.convert(p4))
... except KeyError:
... print("Exception thrown")
Exception thrown
"""
if val_t[1] is None and val_t[2] is None:
return val_t
# Determine which units (eg, "mbar") this group should be in.
# If the user has not specified anything, then fall back to US Units.
new_unit_type = self.group_unit_dict.get(val_t[2], USUnits[val_t[2]])
# Now convert to this new unit type:
new_val_t = convert(val_t, new_unit_type)
return new_val_t
def convertDict(self, obs_dict):
"""Convert an observation dictionary into the target unit system.
The source dictionary must include the key 'usUnits' in order for the
converter to figure out what unit system it is in.
The output dictionary will contain no information about the unit
system (that is, it will not contain a 'usUnits' entry). This is
because the conversion is general: it may not result in a standard
unit system.
Example: convert a dictionary which is in the metric unit system
into US units
>>> # Construct a default converter, which will be to US units
>>> c = | |
for the SQLSession class
:param database_url: string with unique resource identifier to database
:param verbose: [optional] boolean to enable database logging to stdout
NOTE: to create a new database, use a tool SQL Workbench
https://data36.com/install-sql-workbench-postgresql/
SET AUTOCOMMIT = ON
create database mydb
'''
title = '%s.__init__' % self.__class__.__name__
# construct fields
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# validate inputs
input_fields = {
'database_url': database_url
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate database url
import re
url_split = re.compile('://+')
url_error = '%s(database_url="..." must have the format: dialect://user:password@host:port or dialect:///../path/to/local.db' % title
if not url_split.findall(database_url):
raise ValueError(url_error)
sql_dialect, db_path = url_split.split(database_url)
if sql_dialect == 'sqlite':
from os import path
try:
db_root, db_file = path.split(db_path)
except:
raise ValueError(url_error)
if db_root:
if not path.exists(db_root):
from os import makedirs
makedirs(db_root)
else:
db_cred, db_file = db_path.split('@')
# TODO add method to create database if not there
# https://stackoverflow.com/questions/6506578/how-to-create-a-new-database-using-sqlalchemy
# construct database session
self.engine = create_engine(database_url, echo=verbose)
self.session = self.engine.connect()
self.database_name = db_file
self.database_url = database_url
self.verbose = verbose
self.database_dialect = sql_dialect
class SQLTable(object):
''' a class to store json valid records in a sql database
REFERENCES:
https://docs.sqlalchemy.org/en/13/core/tutorial.html
'''
_class_fields = {
'schema': {
'int': 0,
'table_name': 'User_Data',
'record_schema': {
'schema': {}
},
'limit': 1,
'cursor': 1,
'original': {},
'updated': {},
'merge_rule': 'overwrite',
'filter': {},
'sort': [{}]
},
'components': {
'table_name': {
'max_length': 255,
'must_not_contain': ['/', '\\.', '-', '^\d', '^__', '\s']
},
'merge_rule': {
'discrete_values': ['overwrite', 'skip', 'update']
},
'record_schema': {
'extra_fields': True
},
'limit': {
'greater_than': 0,
'integer_data': True,
'max_value': 1000
},
'cursor': {
'greater_than': 0,
'integer_data': True
},
'int': {
'max_value': 9223372036854775807,
'integer_data': True
}
}
}
def __init__(self, sql_session, table_name, record_schema, rebuild=True, default_values=False, verbose=False):
'''
the initialization method for the SQLTable class
:param sql_session: sql.SQLSession object
:param table_name: string with name for table of records
:param record_schema: dictionary with jsonmodel valid schema for records
:param rebuild: [optional] boolean to rebuild table with schema changes
:param default_values: [optional] boolean to add default values to records
:param verbose: [optional] boolean to enable database logging to stdout
NOTE: init will automatically update the table schema if the record schema
differs from the existing table. in order to change the name of a field
without losing the data associated with the old name, add the old key
name to the field's metadata in the schema declaration:
components['.new_field']['field_metadata']['replace_key'] = '.old_field'
NOTE: as datatypes are required to define columns, null values are not
allowed in the record schema
'''
title = '%s.__init__' % self.__class__.__name__
# construct fields
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# validate inputs
input_fields = {
'table_name': table_name,
'record_schema': record_schema
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate sql client input
if not isinstance(sql_session, SQLSession):
raise ValueError('%s(sql_client) must be an instance of the SQLSession class.')
# construct record model
self.record_schema = record_schema
self.model = jsonModel(record_schema)
self.default = self.model.ingest(**{})
self.default_values = default_values
# define item key pattern
import re
self.item_key = re.compile('\[0\]')
# construct database session
self.engine = sql_session.engine
self.session = sql_session.session
self.table_name = table_name
self.database_name = sql_session.database_name
self.database_url = sql_session.database_url
self.verbose = verbose
self.database_dialect = sql_session.database_dialect
self.table_name_safe = table_name.replace('-','_').replace(' ','_').lower()
# construct verbose method
self.printer_on = True
def _printer(msg, flush=False):
if self.verbose and self.printer_on:
if flush:
print(msg, end='', flush=True)
else:
print(msg)
self.printer = _printer
# verify schema criteria
self.empty_maps = set()
for key, value in self.model.keyMap.items():
if not self.database_dialect in ('sqlite', 'postgres'):
# verify max length for string fields for certain sql dialects
if value['value_datatype'] == 'string':
if not 'max_length' in value.keys():
raise ValueError(
'%s database requires a "max_length" be declared for string field %s in record_schema.' % (
self.database_dialect, key))
# verify no null datatype declarations
if value['value_datatype'] == 'null':
raise ValueError('%s(record_schema={...}) field %s cannot have the null datatype.' % (title, key))
# verify id key is a string or number
if key == '.id':
if not value['value_datatype'] in ('string','number'):
raise ValueError('%s(record_schema={...}) field %s must be a string or number datatype.' % (title, key))
if len(key) > 1 and value['value_datatype'] == 'map':
if value['extra_fields']:
default = self.model._walk(key, self.default)
if not default[0]:
self.empty_maps.add(key)
# # ORM construct
# from sqlalchemy.orm import sessionmaker
# from sqlalchemy.ext.declarative import declarative_base
# self.engine = create_engine(database_url, echo=verbose)
# dbSession = sessionmaker(bind=self.engine)
# self.session = dbSession()
# self.base = declarative_base()
# class RecordObject(self.base):
# __tablename__ = table_name
# id = Column(String, primary_key=True)
# self.base.metadata.create_all(self.engine)
# construct lab id class
from labpack.records.id import labID
self.labID = labID
# construct table metadata
from sqlalchemy import Table, MetaData
metadata = MetaData()
# determine if there is a prior table
migration_complete = True
prior_name = self.table_name
table_pattern = '%s_old_\w{24}' % self.table_name
table_regex = re.compile(table_pattern)
self.tables = self.engine.table_names()
for table in self.tables:
if table_regex.findall(table):
prior_name = table
migration_complete = False
break
prior_columns = self._extract_columns(prior_name)
# construct new table object
add_sequence = False
current_columns = self._parse_columns()
if not 'id' in current_columns.keys():
current_columns['id'] = ['id', 'string', '', None]
if not self.database_dialect in ('sqlite', 'postgres'):
current_columns['id'] = ['id', 'string', '', 24]
self.record_schema['schema']['id'] = ''
self.model = jsonModel(self.record_schema)
elif isinstance(current_columns['id'], int):
if not current_columns['id']:
add_sequence = True
table_args = [self.table_name, metadata]
# TODO add sequence for integer ids
# http://docs.sqlalchemy.org/en/latest/core/defaults.html#sqlalchemy.schema.Sequence
column_args = self._construct_columns(current_columns)
table_args.extend(column_args)
self.table = Table(*table_args)
# process table updates
if prior_columns:
# determine columns to add, remove, rename and change properties
add_columns, remove_columns, rename_columns, retype_columns, resize_columns = self._compare_columns(
current_columns, prior_columns)
# define update functions
# https://stackoverflow.com/questions/7300948/add-column-to-sqlalchemy-table
def _add_column(column_key):
column = getattr(self.table.c, column_key)
column_name = column.key
if column_name.find('.') > -1:
column_name = '"%s"' % column_name
column_type = column.type.compile(self.engine.dialect)
self.engine.execute('ALTER TABLE %s ADD COLUMN %s %s' % (self.table_name, column_name, column_type))
def _remove_column(column):
column_name = column.compile(dialect=self.engine.dialect)
self.engine.execute('ALTER TABLE %s DROP COLUMN %s' % (self.table_name, column_name))
# update table schema
if remove_columns or rename_columns or retype_columns or resize_columns:
if not rebuild:
raise ValueError(
'%s table in %s database must be rebuilt in order to update to desired record_schema. try: rebuild=True' % (
self.table_name, self.database_name))
else:
new_name = self.table_name
old_name = '%s_old_%s' % (self.table_name, self.labID().id24.replace('-', '_').lower())
if not migration_complete:
old_name = prior_name
self._rebuild_table(new_name, old_name, current_columns, prior_columns)
elif not migration_complete:
self.printer('Update of %s table previously interrupted...' % self.table_name)
self._rebuild_table(self.table_name, prior_name, current_columns, prior_columns)
elif add_columns:
column_names = []
for column_key in add_columns.keys():
_add_column(column_key)
column_names.append(column_key)
from labpack.parsing.grammar import join_words
plural = ''
if len(column_names) > 1:
plural = 's'
self.printer('%s column%s added to table %s' % (join_words(column_names), plural, self.table_name))
# or create new table
else:
self.table.create(self.engine)
self.printer('%s table created in %s database.' % (self.table_name, self.database_name))
# add tables property with list of tables in database
self.tables = self.engine.table_names()
def _extract_columns(self, table_name):
''' a method to extract the column properties of an existing table '''
import re
from sqlalchemy import MetaData, VARCHAR, INTEGER, BLOB, BOOLEAN, FLOAT
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, BIT, BYTEA, BIGINT
# retrieve list of tables
metadata_object = MetaData()
table_list = self.engine.table_names()
# determine columns
prior_columns = {}
if table_name in table_list:
metadata_object.reflect(self.engine)
existing_table = metadata_object.tables[table_name]
for column in existing_table.columns:
column_type = None
column_length = None
if column.type.__class__ == FLOAT().__class__:
column_type = 'float'
elif column.type.__class__ == DOUBLE_PRECISION().__class__: # Postgres
column_type = 'float'
elif column.type.__class__ == INTEGER().__class__:
column_type = 'integer'
elif column.type.__class__ == BIGINT().__class__:
column_type = 'integer'
elif column.type.__class__ == VARCHAR().__class__:
column_length = getattr(column.type, 'length', None)
if column_length == 1:
if column.primary_key:
column_length = None
column_type = 'string'
elif column.type.__class__ == BLOB().__class__:
column_type = 'list'
elif column.type.__class__ in (BIT().__class__, BYTEA().__class__):
column_type = 'list'
elif column.type.__class__ == BOOLEAN().__class__:
column_type = 'boolean'
prior_columns[column.key] = (column.key, column_type, '', column_length)
return prior_columns
def _parse_columns(self):
''' a helper method for parsing the column properties from the record schema '''
# construct column list
column_map = {}
for key, value in self.model.keyMap.items():
record_key = key[1:]
if record_key:
if self.item_key.findall(record_key):
pass
else:
if value['value_datatype'] == 'map' and not key in self.empty_maps:
| |
import torch
from transboost.weak_learner import *
from transboost.label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder
from transboost.callbacks import CallbacksManagerIterator, Step, ModelCheckpoint, CSVLogger,\
Progression, BestRoundTrackerCallback,BreakOnMaxStepCallback, \
BreakOnPerfectTrainAccuracyCallback, BreakOnPlateauCallback, BreakOnZeroRiskCallback
from transboost.utils import FiltersGenerator
from torch.nn import functional as F
from transboost.aggregation_mechanism import TransformInvariantFeatureAggregation as Tifa
class TransBoost:
def __init__(self, filters_generator, weak_learner, aggregation_mechanism,
encoder=None, n_filters_per_layer=100, n_layers=3, f0=None,
patience=None, break_on_perfect_train_acc=False, callbacks=None):
"""
Args:
filters_generator (FiltersGenerator object): Objects that generates filters from a bank of examples.
weak_learner (Object that defines the 'fit' method and the 'predict' method): Weak learner that generates weak predictors to be boosted on.
encoder (LabelEncoder object, optional): Object that encodes the labels to provide an easier separation problem. If None, a one-hot encoding is used.
f0 (Array of shape (encoding_dim,), optional, default=None): Initial prediction function. If None, f0 is set to 0.
patience (int, optional, default=None): Number of boosting rounds before terminating the algorithm when the training accuracy shows no improvements. If None, the boosting rounds will continue until max_round_number iterations (if not None).
break_on_perfect_train_acc (Boolean, optional, default=False): If True, it will stop the iterations if a perfect train accuracy of 1.0 is achieved.
callbacks (Iterable of Callback objects, optional, default=None): Callbacks objects to be called at some specific step of the training procedure to execute something. Ending conditions of the boosting iteration are handled with BreakCallbacks. If callbacks contains BreakCallbacks and terminating conditions (max_round_number, patience, break_on_perfect_train_acc) are not None, all conditions will be checked at each round and the first that is not verified will stop the iteration.
"""
self.filters_generator = filters_generator
self.weak_learner = weak_learner
self.aggregation_mechanism = aggregation_mechanism
self.encoder = encoder
self.callbacks = []
self.weak_predictors = []
self.filters = []
self.best_round = None
self.n_layers = n_layers
if isinstance(n_filters_per_layer, int):
self.n_filters_per_layer = [n_filters_per_layer]*n_layers
else:
self.n_filters_per_layer = n_filters_per_layer
if f0 is None:
self.f0 = np.zeros(self.encoder.encoding_dim)
else:
self.f0 = f0
# Callbacks
if callbacks is None:
callbacks = [Progression()]
elif not any(isinstance(callback, Progression) for callback in callbacks):
callbacks.append(Progression())
if break_on_perfect_train_acc:
callbacks.append(BreakOnPerfectTrainAccuracyCallback())
if patience:
callbacks.append(BreakOnPlateauCallback(patience=patience))
self.callbacks = callbacks
def algorithm(self, *args, **kwargs):
return TransBoostAlgorithm(*args, **kwargs)
def fit(self, X, Y, X_val=None, Y_val=None, **weak_learner_fit_kwargs):
"""
Function that fits the model to the data.
The function is split into two parts: the first prepare the data, the second, done in _fit, actually executes the algorithm. The iteration and the callbacks are handled by a CallbacksManagerIterator.
Args:
X (Array of shape (n_examples, ...)): Examples.
Y (Iterable of 'n_examples' elements): Labels for the examples X. Y is encoded with the encode_labels method if one is provided, else it is transformed as one-hot vectors.
X_val (Array of shape (n_val, ...), optional, default=None): Validation examples. If not None, the validation accuracy will be evaluated at each boosting round.
Y_val (Iterable of 'n_val' elements, optional, default=None): Validation labels for the examples X_val. If not None, the validation accuracy will be evaluated at each boosting round.
weak_learner_fit_kwargs: Keyword arguments to pass to the fit method of the weak learner.
Returns self.
"""
# Initialization
self.weak_predictors = []
if not any(isinstance(callback, BestRoundTrackerCallback) for callback in self.callbacks):
if X_val is not None and Y_val is not None:
self.callbacks.append(BestRoundTrackerCallback(quantity='valid_acc'))
else:
self.callbacks.append(BestRoundTrackerCallback(quantity='train_acc'))
# Encodes the labels
if self.encoder is None:
self.encoder = OneHotEncoder(Y)
encoded_Y, weights = self.encoder.encode_labels(Y)
residue = encoded_Y - self.f0
self._fit(X, Y, residue, weights, X_val, Y_val, **weak_learner_fit_kwargs)
return self
def _fit(self, X, Y, residue, weights, X_val, Y_val, **weak_learner_fit_kwargs):
"""
functions that fit fits the model to the data. Should not be used as is but wrapped in the fit function
"""
encoded_Y_pred = self.predict_encoded(X)
encoded_Y_val_pred = self.predict_encoded(X_val) if X_val is not None else None
starting_round = BoostingRound(len(self.weak_predictors))
boost_manager = CallbacksManagerIterator(self, self.callbacks, starting_round)
algo = self.algorithm(boost_manager, self.encoder, self.weak_learner,
self.filters_generator, self.aggregation_mechanism,
X, Y, residue, weights, encoded_Y_pred,
X_val, Y_val, encoded_Y_val_pred,
self.n_filters_per_layer, self.n_layers)
algo.fit(self.weak_predictors, self.filters, **weak_learner_fit_kwargs)
def predict(self, X, mode='best'):
"""
predict the labels of the examples X
:param X: examples
:param mode: 'best' will only use the weights used by the best round and 'last' will use all the weights
:return: (Tensor) decoded labels
"""
return self.encoder.decode_labels(self.predict_encoded(X, mode))
def predict_encoded(self, X, mode='last'):
"""
predict the encoded labels f the examples X
:param X: (Tensor) examples
:param mode: 'best' will only use the weights used by the best round and 'last' will use all the weights
:return: (Iterable) encoded labels
"""
encoded_Y_pred = np.zeros((X.shape[0], self.encoder.encoding_dim)) + self.f0
if mode == 'best':
best = self.best_round.step_number + 1
wps = self.weak_predictors[:best]
filters = self.filters[:best]
else:
wps = self.weak_predictors
filters = self.filters
for wp, f in zip(wps, filters):
S = self.aggregation_mechanism(X, f)
encoded_Y_pred += wp.predict(S)
return encoded_Y_pred
def evaluate(self, X, Y, return_risk=False, mode='best'):
"""
evaluate the accuracy of the trained algorithm
:param X: examples to predict
:param Y: labels
:param return_risk: if True, the function will also calculate the quadratic risk of this prediction
:param mode: 'best' will only use the weights used by the best round and 'last' will use all the weights
:return: acccuracy (double) or (accuracy, risk)
"""
encoded_Y_pred = self.predict_encoded(X, mode)
Y_pred = self.encoder.decode_labels(encoded_Y_pred)
accuracy = accuracy_score(y_true=Y, y_pred=Y_pred)
if return_risk:
encoded_Y, W = self.encoder.encode_labels(Y)
risk = np.sum(W * (encoded_Y - self.f0 - encoded_Y_pred)**2)
return accuracy if not return_risk else (accuracy, risk)
class TransBoostAlgorithm:
"""
This is an implementation of the TransBoost algorithm. It is intended to be used inside the TransBoost class API and not as is.
"""
def __init__(self, boost_manager, encoder, weak_learner, filters_generator,
aggregation_mechanism,
X, Y, residue, weights, encoded_Y_pred,
X_val, Y_val, encoded_Y_val_pred,
n_filters_per_layer=list(), n_layers=3):
self.boost_manager = boost_manager
self.encoder = encoder
self.weak_learner = weak_learner
self.aggregation_mechanism = aggregation_mechanism
self.X, self.Y, self.residue, self.weights = X, Y, residue, weights
self.X_val, self.Y_val = X_val, Y_val
self.encoded_Y_pred = encoded_Y_pred
self.encoded_Y_val_pred = encoded_Y_val_pred
self.filters_generator = filters_generator
self.n_filters_per_layer = n_filters_per_layer
self.n_layers = n_layers
def fit(self, weak_predictors, filters, **weak_learner_fit_kwargs):
"""
Executes the algorithm.
Appends the weak_predictors list with the fitted weak learners.
Args:
weak_predictors (list): Reference to the list of weak_predictors of the model.
filters (list of Filters object): Filters used in the aggregation mechanism.
weak_learner_fit_kwargs: Keyword arguments needed to fit the weak learner.
Returns None.
"""
with self.boost_manager: # boost_manager handles callbacks and terminating conditions
for boosting_round in self.boost_manager:
this_round_filters = get_multi_layers_filters(self.filters_generator, self.n_filters_per_layer)
S = self.aggregation_mechanism(self.X, this_round_filters)
weak_predictor = self.weak_learner().fit(S, self.residue, self.weights, **weak_learner_fit_kwargs)
weak_prediction = weak_predictor.predict(S)
self.residue -= weak_prediction
weak_predictors.append(weak_predictor)
filters.append(this_round_filters)
self._evaluate_round(boosting_round, weak_prediction, weak_predictor, this_round_filters)
def _evaluate_round(self, boosting_round, weak_prediction, weak_predictor, filters):
"""
Compute the metrics for the current boosting round
Appends the weak_predictors and weak_predictors_weights lists with the fitted weak learners.
Args:
boosting round (BoostingRound Object): Object use to store information on the metrics
weak_prediction (List): Reference to the predictions
weak_predictor(WeakLearner Object): Reference to the trained weak learner for the current round
filters(Filters Object) : filters used to get the multlayers attribute for this round
Returns None.
"""
self.encoded_Y_pred += weak_prediction
Y_pred = self.encoder.decode_labels(self.encoded_Y_pred)
boosting_round.train_acc = accuracy_score(y_true=self.Y, y_pred=Y_pred)
boosting_round.risk = np.sum(self.weights * self.residue**2)
if not (self.X_val is None or self.Y_val is None or self.encoded_Y_val_pred is None):
S = self.aggregation_mechanism(self.X_val, filters)
self.encoded_Y_val_pred += weak_predictor.predict(S)
Y_val_pred = self.encoder.decode_labels(self.encoded_Y_val_pred)
boosting_round.valid_acc = accuracy_score(y_true=self.Y_val, y_pred=Y_val_pred)
def advance_to_the_next_layer(X, filters):
"""
Transform the examples by using convolution with filters. The
Args:
X (Array of shape (n_examples, ...)): Examples to advance to the next layers
filters (Filters object): Filters to be convoluted with the example
Returns the transformed examples (Tensor of shape (n_examples, n_filters, ...)).
"""
if torch.cuda.is_available():
torch.cuda.empty_cache()
weights = filters.weights.to(device=X.device)
next_layer = F.conv2d(X, weights)
next_layer -= torch.mean(next_layer, dim=0)
maxim = torch.unsqueeze(torch.max(next_layer, dim=0)[0], dim=0)
abs_min =torch.unsqueeze(torch.abs(torch.min(next_layer, dim=0)[0]), dim=0)
scale = torch.max(torch.cat((maxim, abs_min), dim=0), dim = 0)[0]
scale = torch.where(scale <= 1, torch.ones_like(scale), scale)
next_layer /= scale
# n_filters, n_channels, width, height = filters.weights.shape
# next_layer.shape -> (n_examples, n_filters, conv_height, conv_width)
# next_layer = F.max_pool2d(next_layer, (2,2), ceil_mode=True)
# F.relu(next_layer, inplace=True)
# next_layer = torch.tanh(next_layer) # , inplace=True)
return next_layer
def get_multi_layers_filters(filters_generator: FiltersGenerator, n_filters_per_layer):
"""
generate filters for each layers by convolving filters together:
filters of the second layers are obtaine by convolving filters of the first layer together
filters of the third layers are obtained by convolving filter of the second layer together
Args:
filters_generator | |
media types are selected, that it's just a normal form type and proceed
newFormType.type = 0;
#Update the form type's group
#If it's a new group
if post_data.get('ft_group') == 'NEW':
#Create a new formtype group
newFormTypeGroup = FormTypeGroup(name=post_data.get('ft_group_new'), project=PROJECT)
newFormTypeGroup.save()
newFormType.form_type_group = newFormTypeGroup
#Otherwise it's not a new group and not being removed so use the provided value
elif post_data.get('ft_group') != 'NONE':
newFormType.form_type_group = FormTypeGroup.objects.get(pk=post_data.get('ft_group'))
#update the formtypes status as hierarchical
if 'is_hierarchical' in post_data:
newFormType.is_hierarchical = True
else:
newFormType.is_hierarchical = False
#set privacy of form type
newFormType.is_public = False;
#save the FormType to give it a new pk in the database
newFormType.save()
#Each row in the CSV file represents a new 'Form' of the 'newFormType'
#Let's make a 'row' counter to help with indexing through the CSV file
row_index = 0
#Let's make an incremental counter for record type orders
order_counter = 1;
#I'm also going to make a List() of AttributeTypes/ReferenceTypes. This is done so that
#after 1 iteration of the importer loop, the reference types/ attribute types are already created. We
#don't need to create them for every row--so after the first row, we reference this list for the reference
# and attribute values
typeList = {}
print >> sys.stderr, "Just making sure things are working still....where's the stop point?"
keepAliveTimer = time.clock()
#print >>sys.stderr, "Starting row loop: " + str(timerB) + " Time elapsed = " + str(timerB-timerA)
#For each row of the CSV
for row in csv_json:
#print >> sys.stderr, "222 Just making sure things are working still....where's the stop point?"
timerBeginRow = time.clock()
#print >>sys.stderr, "Starting a new row: " + str(timerBeginRow)
#If we are past index '0' then let's continue with the rest of the importer
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ CREATE NEW FORM @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#-----------------------------------------------------------------------------------------------------------
#Create a new Form and attach the newly created 'FormType' to 'form_type' in the 'Form' model
newForm = Form()
newForm.form_type = newFormType
newForm.project = PROJECT
newForm.is_public = False
#we will worry about adding the form_name / form_number later
#save the Form to give it a pk value in the database. Now we can use it for variable assignments later
newForm.save()
#For each column in the CSV Row and the column headers (essentially all the dict/JSON key values
#We setup a bool test to determine if we find a primary id that is selected or not.
#--if we don't find a primary id by the time we end the list, set the form's name to the current row counter number
foundAMainID = False
for key, value in row.iteritems():
#timerJ = time.clock()
#print >>sys.stderr, "Starting col loop: " + str(timerJ)
#First check if this column is the unique ID for this form
#we'll see if it is by checking the POST_DATA if 'record__(n)__ismainID' exists
if 'record__'+str(key)+'__ismainID' in post_data:
#If it is, then add this column value to the current Form's "form_number" or "form_name"
#Try to add it as an int first, otherwise add it as the form name
foundAMainID = True
try:
newForm.form_number = int(value)
newForm.form_name = value
except:
newForm.form_name = value
#save the Form
newForm.save()
#If it is not the ID field:
#If the current column is the value to reference a hierarchy field then add it to our hierarchy Dict
#--we will process this later, because if we try now, not all of the self-referencing forms will be imported yet
#--and this will more than likely miss a number of them
elif 'record__'+str(key)+'__ishierarchy' in post_data:
#We add the current Form's pk value for the key, and the reference pk as the value
hierarchyDict[str(newForm.pk)] = value
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ADD A RECORD REFERENCE TYPE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#------------------------------------------------------------------------------------------------------------------------
#Test if it is a reference type by checking the POST_DATA if 'record__(n)__isreference' exists
#If it is a reference Type:
elif 'record__'+str(key)+'__isreference' in post_data:
#We want to make sure we only create the ReferenceType's once--otherwise we populate the database with several
#unecessary copies and relations that muddy everything. So if we're past the first row/iteration of the JSON, the reference types are
#already created and stored in a list to reference after
if row_index < 1:
#create a new FormRecordReferenceType and set "record_type" variable to the header column user-given name value
newFormRecordReferenceType = FormRecordReferenceType()
newFormRecordReferenceType.project = PROJECT
newFormRecordReferenceType.is_public = False
newFormRecordReferenceType.record_type = post_data.get('record__'+str(key)+'__name')
#also set "form_type_parent" to the current formType we are importing
newFormRecordReferenceType.form_type_parent = newFormType
#now set "form_type_reference" to the selected FormTypeReference value in the current importer Column
#if the value == 'default' then set reference to this same FormType
if post_data.get('record__'+str(key)+'__reftype') == 'default':
newFormRecordReferenceType.form_type_reference = newFormType
#otherwise set it to the given pk value of a FormType object
else:
newFormRecordReferenceType.form_type_reference = FormType.objects.get(pk=post_data.get('record__'+str(key)+'__reftype'))
#Set an arbitrary initial order for the type
newFormRecordReferenceType.order_number = order_counter
order_counter += 1
#save the Record Reference Type
newFormRecordReferenceType.save()
#add it to the list so that the reference value can reference it
typeList[key] = newFormRecordReferenceType
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ADD A RECORD REFERENCE VALUE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#-------------------------------------------------------------------------------------------------------------------------
#Create a new RecordReferenceValue
newFormRecordReferenceValue = FormRecordReferenceValue()
newFormRecordReferenceValue.project = PROJECT
newFormRecordReferenceValue.is_public = False
#set the "external_key_reference" to the column value of the csv row
newFormRecordReferenceValue.external_key_reference = value
#set the "form_parent" to the current row's Form
newFormRecordReferenceValue.form_parent = newForm
#set the "record_reference_type" to the current RecordReferenceType
logging.info("line626 " + str(typeList[key].form_type_reference) + " :: " + newFormRecordReferenceValue.external_key_reference)
newFormRecordReferenceValue.record_reference_type = typeList[key]
#save the value to give it a pk value
newFormRecordReferenceValue.save()
#logging.info("We are about to check the reference for: " + str(newFormRecordReferenceValue))
#If this reference is self-referencing to the same form formtype we're importing, then similar to the hierchy references,
#--we need to store a list of the reference value objects to load once the entire form type has been imported. We don't need key values because
#--the external key reference is already saved for the lookup on the model.
#--I'm using the objects rather pk values because that will save us time on SQL queries later
if post_data.get('record__'+str(key)+'__reftype') == 'default':
selfReferenceList.append(newFormRecordReferenceValue)
else:
#Now we need to set the value for "record_reference" which will involve a query
#And since the external key could contain multiple values, we need to split them by the comma delimeter
#logging.info(newFormRecordReferenceValue.external_key_reference + " : BEFORE SPLIT")
possibleRefValues = newFormRecordReferenceValue.external_key_reference.split(",")
#logging.info(str(possibleRefValues) + " : SPLIT")
#for all forms in the selected FormType reference
for aForm in newFormRecordReferenceValue.record_reference_type.form_type_reference.form_set.all().prefetch_related():
#if the current external ID value == to the iterated forms "form_num"
#Make sure we convert the INT form-num to a STR first or it will fail the check
for refValue in possibleRefValues:
if refValue == str(aForm.form_number):
#remove this value from future matches to ensure we don't double add it
possibleRefValues.remove(refValue)
#set the current FormRecordReferenceValue.record_reference to the current form in the loop iteration
newFormRecordReferenceValue.record_reference.add(aForm)
#logging.info(newFormRecordReferenceValue.external_key_reference + " : AFTER SPLIT")
#if there are no matches by the last iteration of the loop,
#we can do nothing to leave the record_reference value as "None" (the user can set this later)
#This might happen if the user is importing a new form type that references itself, or references
#another form type that hasn't yet been imported. The external_key_reference's are still saved
#so the user can run another tool to match these keys later once all the Form Types and forms have been
#imported through this tool
#save the RecordReferenceValue
newFormRecordReferenceValue.save()
#timerE = time.clock()
#print >>sys.stderr, "Ending ref lookup: " + str(timerE) + " Time elapsed = " + str(timerE-timerD)
#If it is not a reference type, then we are adding an attribute type instead
else:
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ADD A RECORD ATTRIBUTE TYPE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#------------------------------------------------------------------------------------------------------------------------
#We want to make sure we only create the AttributeType's once--otherwise we populate the database with several
#unecessary copies and relations that muddy everything. So if we're past the first row, the attribute types are
#already created and stored in | |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 11:54:36 2021
@author: Robert https://github.com/rdzudzar
"""
# Package imports
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import cmasher as cmr
import numpy as np
#from scipy import stats
import scipy.stats
import math
from bokeh.plotting import figure
from bokeh.models import Legend
#from bokeh.io import curdoc
# Helper function imports
# These are pre-computed so that they don't slow down the App
from helper_functions import distr_selectbox_names,creating_dictionaries
import time
import base64
import collections
def page_fit():
"""
The fit page in this app made with Streamlit is for a fitting a selected
distribution(s) to the User imported data.
"""
name_docstring_dict, name_eq_dict, name_proper_dict, \
all_dist_params_dict, name_url_dict = creating_dictionaries()
st.sidebar.info("""
Import your data as a **.csv** file
and follow instructions to fit a
continuous distribution(s) to your data.
""")
# Add a bit empy space before showing About
st.sidebar.text("")
st.sidebar.markdown("**Select Figure Mode:**")
plot_mode = st.sidebar.radio("Options", ('Dark Mode', 'Light Mode'))
st.sidebar.text("")
st.sidebar.text("")
st.markdown("<h1 style='text-align: center;'> Fit distribution(s) </h1>",
unsafe_allow_html=True)
#Streamlit Sharing if you set the config option in .streamlit/config.toml:
#[server]
#maxUploadSize=2
# Using cache as we perform only once 'loading' of the data
@st.cache
def load_csv():
""" Get the loaded .csv into Pandas dataframe. """
df_load = pd.read_csv(input, sep=None , engine='python',
encoding='utf-8')
return df_load
# Streamlit - upload a file
input = st.file_uploader('')
# Ask for upload if not yet.
if input is None:
st.write('Upload your data, or:')
download_sample = st.checkbox("Download sample data")
# Get the Sample file from App GitHub repository
try:
if download_sample:
st.markdown(""" [Download](https://github.com/rdzudzar/DistributionAnalyser/blob/main/sample_data/sample_da.csv)""")
st.markdown("""**after you download data,
upload them above.*""")
except:
# If the user imports file - parse it, and ask User to select a column.
if input:
with st.spinner('Loading data...'):
# Pass to a function above so we can use st.cache
df = load_csv()
columns = list(df.columns)
# User can see which columns are present in the imported file
st.write("Available columns (expand to see):")
st.write(columns)
# Select menu for User to pick the data
# This 'data_col' selection is parsed as selected dataframe
data_col = st.selectbox("Select input",
options = columns,
format_func=lambda x: x)
# Get the selected column
# Replace inf/-inf with NaN and remove NaN if present
df = df[data_col].replace([np.inf, -np.inf], np.nan).dropna()
def plot(df, data_stat):
"""
Histogram of the input data. Contains also information about the
Figure style, depending on the active Mode.
"""
if plot_mode == 'Light Mode':
hist_edge_color = 'black'
hist_color= 'white'
quant_color = 'black'
median_color = 'black'
pdf_color = '#08519c'
cdf_color = 'black'
plt.style.use('classic')
plt.rcParams['figure.facecolor'] = 'white'
if plot_mode == 'Dark Mode':
hist_edge_color = 'black'
hist_color= 'white'
median_color = 'magenta'
quant_color = 'white'
pdf_color = '#fec44f'
cdf_color = 'white'
plt.style.use('dark_background')
plt.rcParams['figure.facecolor'] = 'black'
fig, ax = plt.subplots(1,1)
# Plot hist
ax.hist(df, bins=round(math.sqrt(len(df))),
density=True, color=hist_color,
ec=hist_edge_color, alpha=0.3)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.tick_params(top=False, bottom=True, left=True, right=False,
labelleft=True, labelbottom=True)
ax.set_xlabel(f'{data_col}')
ax.set_ylabel('Density')
# If user selects data_stat
if data_stat:
# Hist contains tuple: n bins, (n+1) bin boundaries
hist = np.histogram(df, bins=round(math.sqrt(len(df))))
#Generates a distribution given by a histogram.
hist_dist = scipy.stats.rv_histogram(hist)
x_plot = np.linspace(min(df), max(df), len(df))
q = [0.05, 0.25, 0.50, 0.75, 0.95]
n = ['5th','25th','50th','75th','95th']
quantiles = df.quantile(q)
q_max = hist_dist.cdf(quantiles)
for i, qu in enumerate(quantiles):
ax.plot(qu, q_max[i], alpha=0.5, color=quant_color,
markersize=10, marker='D')
ax.text(qu, q_max[i]+(q_max[i]/10), f'{n[i]}', ha='center',
alpha=0.5)
ax.scatter([], [], alpha=0.5, color=quant_color, marker='D',
label='Percentiles')
# The pdf is defined as a stepwise function from the provided histogram.
# The cdf is a linear interpolation of the pdf.
ax.plot(x_plot, hist_dist.pdf(x_plot), linewidth=2,
color=pdf_color, label='PDF')
ax.plot(x_plot, hist_dist.cdf(x_plot), linewidth=2,
color=cdf_color, label='CDF')
ax.vlines(np.mean(df), ymin=0, ymax=hist_dist.cdf(np.mean(df)),
color='red', linestyle='--', linewidth=2,
label=f'Mean {round(np.mean(df),2)}')
ax.vlines(np.median(df), ymin=0, ymax=hist_dist.cdf(np.median(df)),
color=median_color, linestyle='--', linewidth=2,
label=f'Median {round(np.median(df),2)}')
leg = plt.legend(loc=0)
leg.get_frame().set_edgecolor("#525252")
return fig
def bokeh_set_plot_properties(plot_mode, n):
"""
Constructs a list of properties that will be assigned to a Bokeh
figure depending whether it is in the Light or Dark Mode.
Parameters
----------
plot_mode : string; plot 'Dark Mode' or 'Light Mode'
Returns
-------
p : Bokeh Figure
colors_cmr : Colors from the colormap to be assigned to lines
"""
p = figure(height=450, width=700)
p.add_layout(Legend(), 'right')
p.legend.title = '15 Best Fits and their SSE'
p.legend.background_fill_alpha = 1
p.legend.label_text_font_size = '11pt'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.axis_label = f'{data_col}'
p.yaxis.axis_label = 'Density'
if plot_mode == 'Dark Mode':
text_color = 'white'
back_color = 'black'
legend_color = 'yellow'
# It will get n colors from cmasher rainforest map
# if n>15, it will take 15; otherwise n will be the
# lengthe of the chosed distributions (n defined line 685)
colors_cmr = cmr.take_cmap_colors('cmr.rainforest_r',
n, cmap_range=(0.1, 0.7),
return_fmt='hex')
if plot_mode == 'Light Mode':
text_color = 'black'
back_color = 'white'
legend_color = 'blue'
colors_cmr = cmr.take_cmap_colors('cmr.rainforest',
n, cmap_range=(0.2, 0.9),
return_fmt='hex')
p.legend.title_text_color = text_color
p.yaxis.major_label_text_color = text_color
p.xaxis.axis_label_text_color = text_color
p.xaxis.major_label_text_color = text_color
p.yaxis.axis_label_text_color = text_color
p.xaxis.major_tick_line_color = text_color
p.yaxis.major_tick_line_color = text_color
p.xaxis.minor_tick_line_color = text_color
p.yaxis.minor_tick_line_color = text_color
p.xaxis.axis_line_color = text_color
p.yaxis.axis_line_color = text_color
p.border_fill_color = back_color
p.background_fill_color = back_color
p.legend.background_fill_color = back_color
p.legend.label_text_color = legend_color
p.title.text_color = legend_color
p.outline_line_color = back_color
return p, colors_cmr
def bokeh_pdf_plot_results(df, results, n):
"""
Process results and plot them on the Bokeh Figure. User can interact
with the legend (clicking on the items will enhance lines on Figure)
Parameters
----------
df : input data
results : nested list (contains tuples) with the data from the
fitting (contains [sse, arg, loc, scale])
n : integer; First n best fit PDFs to show on the Figure.
plot_mode : string; 'Dark Mode' or 'Light Mode' (connected with radio
button)
Returns
-------
p : Returns Bokeh interactive figure (data histogram+best fit PDFs)
"""
# Pasing dictionary with best fit results
fit_dict_res = fit_data(df)
hist, edges = np.histogram(df, density=True,
bins=round(math.sqrt(len(df))))
# Obtain Figure mode from the function: bokeh_set_plot_properties
p, colors_cmr = bokeh_set_plot_properties(plot_mode, n)
# Bokeh histogram
p.quad(top=hist, bottom=0, left=edges[:-1],
right=edges[1:], line_color="black",
line_width = 0.3,
fill_color='white', fill_alpha = 0.3)
# Plot each fitted distribution
i = -1
for distribution, result in fit_dict_res.items():
i += 1
sse = round(result[0],2)
arg = result[1]
loc = result[2]
scale = result[3]
best_params = result[1:4]
flat_list = list(flatten(best_params))
param_names = (distribution.shapes + ', loc, scale').split(', ') if distribution.shapes else ['loc', 'scale']
param_str = ', '.join([f'{k} = {round(v,2)}' for k,v
in zip(param_names, flat_list)])
# Generate evenly spaced numbers over a specified interval
# Make pdf/cdf with the parameters of fitted functions
x_plot = np.linspace(min(df), max(df), 400)
y_plot = distribution.pdf(x_plot, loc=loc, scale=scale, *arg)
# The best fit distribution will be with i=0
if i == 0:
# Bokeh line plot with interactive legend
line = p.line(x_plot, y_plot, line_width=5,
line_color = colors_cmr[0],
legend_label=str(distribution.name) + ": " + str(sse)
)
line.visible = True
p.legend.click_policy = "hide"
p.title.text = f'Best fit {distribution.name}: {param_str}'
if distribution.name in name_eq_dict.keys():
st.markdown(f"""You can read more about best fit distribution:
[**{name_url_dict[distribution.name][1]}**]
({name_url_dict[distribution.name][0]})
""")
# Next 15 best fits; 15 is arbitrary taken.
elif (i>0) and (i < 15):
lines = p.line(x_plot, y_plot, line_width=2.5,
line_dash="10 2",
line_color = colors_cmr[i],
legend_label =str(distribution.name) + ": " + str(sse)
)
lines.visible = False
p.legend.click_policy = "hide"
else:
pass
return p
def bokeh_cdf_plot_results(df, results, n):
"""
Process results and plot them on the Bokeh Figure. User can interact
with the legend (clicking on the items will enhance lines on Figure)
Parameters
----------
df : input data
results : nested list (contains tuples) with the data from the
fitting (contains [sse, arg, loc, scale])
n : integer; First n best fit CDFs to show on the Figure.
plot_mode : string; 'Dark Mode' or 'Light Mode' (connected with radio
button)
Returns
-------
p : Returns Bokeh interactive figure (data hostogram+best fit CDFs)
"""
# Hist contains tuple: n bins, (n+1) | |
#*****************************************************************************
# inventory.py
#
# XML I/O for Inventory (new schema)
#
# (c) 2006 <NAME>, GFZ Potsdam
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version. For more information, see http://www.gnu.org/
#*****************************************************************************
import xmlwrap as _xmlwrap
from seiscomp.db import DBError
try:
from xml.etree import cElementTree as ET # Python 2.5?
except ImportError:
import cElementTree as ET
_root_tag = "{http://geofon.gfz-potsdam.de/ns/Inventory/1.0/}inventory"
class _UsedSensor(object):
def __init__(self):
self.calibration = set()
def reg_calibration(self, id):
self.calibration.add(id)
class _UsedDatalogger(object):
def __init__(self):
self.decimation = set()
self.calibration = set()
def reg_decimation(self, sampleRateNumerator, sampleRateDenominator):
self.decimation.add((sampleRateNumerator, sampleRateDenominator))
def reg_calibration(self, id):
self.calibration.add(id)
class _UsedAuxilliaryDevice(object):
def __init__(self):
self.source = set()
def reg_source(self, id):
self.source.add(id)
class _UsedInstruments(object):
def __init__(self):
self.sensor = {}
self.datalogger = {}
self.auxDevice = {}
def reg_sensor(self, name):
try:
return self.sensor[name]
except KeyError:
obj = _UsedSensor()
self.sensor[name] = obj
return obj
def reg_datalogger(self, name):
try:
return self.datalogger[name]
except KeyError:
obj = _UsedDatalogger()
self.datalogger[name] = obj
return obj
def reg_auxDevice(self, name):
try:
return self.auxDevice[name]
except KeyError:
obj = _UsedAuxilliaryDevice()
self.auxDevice[name] = obj
return obj
class _UsedFilters(object):
def __init__(self):
self.filters = set()
def reg_filter(self, name):
self.filters.add(name)
#*****************************************************************************
# XML IN (instruments)
#*****************************************************************************
def _responseFIR_in(xresponseFIR, inventory):
if xresponseFIR.action == "delete":
try:
inventory.remove_responseFIR(xresponseFIR.name)
except KeyError:
pass
return
try:
responseFIR = inventory.responseFIR[xresponseFIR.name]
if responseFIR.publicID != xresponseFIR.publicID:
inventory.remove_responseFIR(xresponseFIR.name)
raise KeyError
except KeyError:
responseFIR = inventory.insert_responseFIR(xresponseFIR.name, publicID=xresponseFIR.publicID)
xresponseFIR._copy_to(responseFIR)
def _responsePAZ_in(xresponsePAZ, inventory):
if xresponsePAZ.action == "delete":
try:
inventory.remove_responsePAZ(xresponsePAZ.name)
except KeyError:
pass
return
try:
responsePAZ = inventory.responsePAZ[xresponsePAZ.name]
if responsePAZ.publicID != xresponsePAZ.publicID:
inventory.remove_responsePAZ(xresponsePAZ.name)
raise KeyError
except KeyError:
responsePAZ = inventory.insert_responsePAZ(xresponsePAZ.name, publicID=xresponsePAZ.publicID)
xresponsePAZ._copy_to(responsePAZ)
def _responsePolynomial_in(xresponsePolynomial, inventory):
if xresponsePolynomial.action == "delete":
try:
inventory.remove_responsePolynomial(xresponsePolynomial.name)
except KeyError:
pass
return
try:
responsePolynomial = inventory.responsePolynomial[xresponsePolynomial.name]
if responsePolynomial.publicID != xresponsePolynomial.publicID:
inventory.remove_responsePolynomial(xresponsePolynomial.name)
raise KeyError
except KeyError:
responsePolynomial = inventory.insert_responsePolynomial(xresponsePolynomial.name, publicID=xresponsePolynomial.publicID)
xresponsePolynomial._copy_to(responsePolynomial)
def _responseFAP_in(xresponseFAP, inventory):
if xresponseFAP.action == "delete":
try:
inventory.remove_responseFAP(xresponseFAP.name)
except KeyError:
pass
return
try:
responseFAP = inventory.responseFAP[xresponseFAP.name]
if responseFAP.publicID != xresponseFAP.publicID:
inventory.remove_responseFAP(xresponseFAP.name)
raise KeyError
except KeyError:
responseFAP = inventory.insert_responseFAP(xresponseFAP.name, publicID=xresponseFAP.publicID)
xresponseFAP._copy_to(responseFAP)
def _decimation_in(xdecim, device):
if xdecim.action == "delete":
try:
device.remove_decimation(xdecim.sampleRateNumerator,
xdecim.sampleRateDenominator)
except KeyError:
pass
return
try:
decim = device.decimation[xdecim.sampleRateNumerator][xdecim.sampleRateDenominator]
except KeyError:
decim = device.insert_decimation(xdecim.sampleRateNumerator,
xdecim.sampleRateDenominator)
xdecim._copy_to(decim)
def _calibration_in(xcalib, device):
if xcalib.action == "delete":
try:
device.remove_calibration(xcalib.serialNumber, xcalib.channel, xcalib.start)
except KeyError:
pass
return
try:
calib = device.calibration[xcalib.serialNumber][xcalib.channel][xcalib.start]
except KeyError:
calib = device.insert_calibration(xcalib.serialNumber, xcalib.channel, xcalib.start)
xcalib._copy_to(calib)
def _datalogger_in(xlogger, inventory):
if xlogger.action == "delete":
try:
inventory.remove_datalogger(xlogger.name)
except KeyError:
pass
return
try:
logger = inventory.datalogger[xlogger.name]
if logger.publicID != xlogger.publicID:
inventory.remove_datalogger(xlogger.name)
raise KeyError
except KeyError:
logger = inventory.insert_datalogger(xlogger.name, publicID=xlogger.publicID)
xlogger._copy_to(logger)
for xdecim in xlogger.decimation:
_decimation_in(xdecim, logger)
for xcalib in xlogger.calibration:
_calibration_in(xcalib, logger)
def _sensor_in(xsensor, inventory):
if xsensor.action == "delete":
try:
inventory.remove_sensor(xsensor.name)
except KeyError:
pass
return
try:
sensor = inventory.sensor[xsensor.name]
if sensor.publicID != xsensor.publicID:
inventory.remove_sensor(xsensor.name)
raise KeyError
except KeyError:
sensor = inventory.insert_sensor(xsensor.name, publicID=xsensor.publicID)
xsensor._copy_to(sensor)
for xcalib in xsensor.calibration:
_calibration_in(xcalib, sensor)
def _aux_source_in(xaux_source, auxDevice):
if xaux_source.action == "delete":
try:
auxDevice.remove_auxSource(xaux_source.name)
except KeyError:
pass
return
try:
aux_source = auxDevice.aux_source[xaux_source.name]
except KeyError:
aux_source = auxDevice.insert_auxSource(xaux_source.name)
xaux_source._copy_to(aux_source)
def _auxDevice_in(xauxDevice, inventory):
if xauxDevice.action == "delete":
try:
inventory.remove_auxDevice(xauxDevice.name)
except KeyError:
pass
return
try:
auxDevice = inventory.auxDevice[xauxDevice.name]
if auxDevice.publicID != xauxDevice.publicID:
inventory.remove_auxDevice(xauxDevice.name)
raise KeyError
except KeyError:
auxDevice = inventory.insert_auxDevice(xauxDevice.name, publicID=xauxDevice.publicID)
xauxDevice._copy_to(auxDevice)
for xaux_source in xauxDevice.source:
_aux_source_in(xaux_source, auxDevice)
#*****************************************************************************
# XML IN (stations)
#*****************************************************************************
def _AuxStream_in(xaux, sl):
if xaux.action == "delete":
try:
sta.remove_auxStream(xaux.code, xaux.start)
except KeyError:
pass
return
try:
aux = sl.auxStream[xaux.code][xaux.start]
except KeyError:
aux = sl.insert_auxStream(xaux.code, xaux.start)
xaux._copy_to(aux)
def _Stream_in(xstrm, sta):
if xstrm.action == "delete":
try:
sta.remove_Stream(xstrm.code, xstrm.start)
except KeyError:
pass
return
try:
strm = sta.stream[xstrm.code][xstrm.start]
except KeyError:
strm = sta.insert_stream(xstrm.code, xstrm.start)
xstrm._copy_to(strm)
def _SensorLocation_in(xsl, sta):
if xsl.action == "delete":
try:
sta.remove_sensorLocation(xsl.code, xsl.start)
except KeyError:
pass
return
try:
sl = sta.sensorLocation[xsl.code][xsl.start]
if sl.publicID != xsl.publicID:
sta.remove_sensorLocation(xsl.code, xsl.start)
raise KeyError
except KeyError:
sl = sta.insert_sensorLocation(xsl.code, xsl.start, publicID=xsl.publicID)
xsl._copy_to(sl)
for xStream in xsl.stream:
_Stream_in(xStream, sl)
for xAuxStream in xsl.auxStream:
_AuxStream_in(xAuxStream, sl)
def _Station_in(xsta, net):
if xsta.action == "delete":
try:
net.remove_station(xsta.code, xsta.start)
except KeyError:
pass
return
try:
sta = net.station[xsta.code][xsta.start]
if sta.publicID != xsta.publicID:
net.remove_station(xsta.code, xsta.start)
raise KeyError
except KeyError:
sta = net.insert_station(xsta.code, xsta.start, publicID=xsta.publicID)
xsta._copy_to(sta)
for xsensorLocation in xsta.sensorLocation:
_SensorLocation_in(xsensorLocation, sta)
def _Network_in(xnet, inventory):
if xnet.action == "delete":
try:
inventory.remove_network(xnet.code, xnet.start)
except KeyError:
pass
return
try:
net = inventory.network[xnet.code][xnet.start]
if net.publicID != xnet.publicID:
inventory.remove_network(xnet.code, xnet.start)
raise KeyError
except KeyError:
net = inventory.insert_network(xnet.code, xnet.start, publicID=xnet.publicID)
xnet._copy_to(net)
for xsta in xnet.station:
_Station_in(xsta, net)
def _StationReference_in(xsref, gr):
if xsref.action == "delete":
try:
inventory.remove_stationReference(xsref.stationID)
except KeyError:
pass
return
try:
sref = gr.stationReference[xsref.stationID]
except KeyError:
sref = gr.insert_stationReference(xsref.stationID)
xsref._copy_to(sref)
def _StationGroup_in(xgr, inventory):
if xgr.action == "delete":
try:
inventory.remove_stationGroup(xgr.code)
except KeyError:
pass
return
try:
gr = inventory.stationGroup[xgr.code]
if gr.publicID != xgr.publicID:
inventory.remove_stationGroup(xgr.code)
raise KeyError
except KeyError:
gr = inventory.insert_stationGroup(xgr.code, publicID=xgr.publicID)
xgr._copy_to(gr)
for xsref in xgr.stationReference:
_StationReference_in(xsref, gr)
#*****************************************************************************
# XML IN (doc)
#*****************************************************************************
def _xmldoc_in(xinventory, inventory):
for xresponseFIR in xinventory.responseFIR:
_responseFIR_in(xresponseFIR, inventory)
for xresponsePAZ in xinventory.responsePAZ:
_responsePAZ_in(xresponsePAZ, inventory)
for xresponsePolynomial in xinventory.responsePolynomial:
_responsePolynomial_in(xresponsePolynomial, inventory)
for xresponseFAP in xinventory.responseFAP:
_responseFAP_in(xresponseFAP, inventory)
for xsensor in xinventory.sensor:
_sensor_in(xsensor, inventory)
for xlogger in xinventory.datalogger:
_datalogger_in(xlogger, inventory)
for xauxDevice in xinventory.auxDevice:
_auxDevice_in(xauxDevice, inventory)
for xnet in xinventory.network:
_Network_in(xnet, inventory)
for xgr in xinventory.stationGroup:
_StationGroup_in(xgr, inventory)
#*****************************************************************************
# XML OUT (instruments)
#*****************************************************************************
def _responseFIR_out(xinventory, responseFIR, modified_after):
if modified_after is None or responseFIR.last_modified >= modified_after:
xresponseFIR = xinventory._new_responseFIR()
xresponseFIR._copy_from(responseFIR)
xinventory._append_child(xresponseFIR)
return True
return False
def _responsePAZ_out(xinventory, responsePAZ, modified_after):
if modified_after is None or responsePAZ.last_modified >= modified_after:
xresponsePAZ = xinventory._new_responsePAZ()
xresponsePAZ._copy_from(responsePAZ)
xinventory._append_child(xresponsePAZ)
return True
return False
def _responsePolynomial_out(xinventory, responsePolynomial, modified_after):
if modified_after is None or responsePolynomial.last_modified >= modified_after:
xresponsePolynomial = xinventory._new_responsePolynomial()
xresponsePolynomial._copy_from(responsePolynomial)
xinventory._append_child(xresponsePolynomial)
return True
return False
def _responseFAP_out(xinventory, responseFAP, modified_after):
if modified_after is None or responseFAP.last_modified >= modified_after:
xresponseFAP = xinventory._new_responseFAP()
xresponseFAP._copy_from(responseFAP)
xinventory._append_child(xresponseFAP)
return True
return False
def _decimation_out(xdevice, decim, modified_after, filters):
if filters is not None:
for f in str(decim.digitalFilterChain).split():
filters.reg_filter(f)
for f in str(decim.analogueFilterChain).split():
filters.reg_filter(f)
if modified_after is None or decim.last_modified >= modified_after:
xdecim = xdevice._new_decimation()
xdecim._copy_from(decim)
xdevice._append_child(xdecim)
return True
return False
def _gain_out(xcalib, gain, modified_after):
if modified_after is None or gain.last_modified >= modified_after:
xgain = xcalib._new_gain()
xgain._copy_from(gain)
xcalib._append_child(xgain)
return True
return False
def _dataloggerCalibration_out(xdevice, calib, modified_after):
xcalib = xdevice._new_calibration()
if modified_after is None or calib.last_modified >= modified_after:
xcalib._copy_from(calib)
retval = True
else:
xcalib.serialNumber = calib.serialNumber
retval = False
if retval:
xdevice._append_child(xcalib)
return retval
def _sensorCalibration_out(xdevice, calib, modified_after):
xcalib = xdevice._new_calibration()
if modified_after is None or calib.last_modified >= modified_after:
xcalib._copy_from(calib)
retval = True
else:
xcalib.serialNumber = calib.serialNumber
retval = False
if retval:
xdevice._append_child(xcalib)
return retval
def _datalogger_out(xinventory, logger, modified_after, used, filters):
xlogger = xinventory._new_datalogger()
if modified_after is None or logger.last_modified >= modified_after:
xlogger._copy_from(logger)
retval = True
else:
xlogger.publicID = logger.publicID
retval = False
for i in logger.decimation.itervalues():
for j in i.itervalues():
if used is None or \
(j.sampleRateNumerator, j.sampleRateDenominator) in used.decimation:
retval |= _decimation_out(xlogger, j, modified_after, filters)
for c in [t for sn in logger.calibration.itervalues()\
for ch in sn.itervalues()\
for t in ch.itervalues()]:
if used is None or c.serialNumber in used.calibration:
retval |= _dataloggerCalibration_out(xlogger, c, modified_after)
if retval:
xinventory._append_child(xlogger)
return retval
def _sensor_out(xinventory, sensor, modified_after, used, filters):
xsensor = xinventory._new_sensor()
if filters is not None and sensor.response != "":
filters.reg_filter(sensor.response)
if modified_after is None or sensor.last_modified >= modified_after:
xsensor._copy_from(sensor)
retval = True
else:
xsensor.publicID = sensor.publicID
retval = False
for c in [t for sn in sensor.calibration.itervalues()\
for ch in sn.itervalues()\
for t in ch.itervalues()]:
if used is None or c.serialNumber in used.calibration:
retval |= _sensorCalibration_out(xsensor, c, modified_after)
if retval:
xinventory._append_child(xsensor)
return retval
def _aux_source_out(xauxDevice, aux_source, modified_after):
if modified_after is None or aux_source.last_modified >= modified_after:
xaux_source = xauxDevice._new_source()
xaux_source._copy_from(aux_source)
xauxDevice._append_child(xaux_source)
return True
return False
def _auxDevice_out(xinventory, auxDevice, modified_after, used):
xauxDevice = xinventory._new_auxDevice()
if modified_after is None or auxDevice.last_modified >= modified_after:
xauxDevice._copy_from(auxDevice)
retval = True
else:
xauxDevice.publicID = auxDevice.publicID
retval = False
for i in auxDevice.source.itervalues():
if used is None or i.name in used.source:
retval |= _aux_source_out(xauxDevice, i, modified_after)
if retval:
xinventory._append_child(xauxDevice)
return retval
#*****************************************************************************
# XML OUT (stations)
#*****************************************************************************
def _Stream_out(xsl, strm, modified_after, used_instr):
if used_instr is not None:
used_sensor = used_instr.reg_sensor(strm.sensor)
if strm.sensorSerialNumber != "":
used_sensor.reg_calibration(strm.sensorSerialNumber)
used_logger = used_instr.reg_datalogger(strm.datalogger)
used_logger.reg_decimation(strm.sampleRateNumerator, strm.sampleRateDenominator)
if strm.dataloggerSerialNumber != "":
used_logger.reg_calibration(strm.dataloggerSerialNumber)
| |
# -*- coding: utf-8 -*-
import os
import sys
import random
import time
import re
import json
import chardet
import operator
from io import BytesIO
import shutil
import hashlib
import threading
import fnmatch
import platform
import traceback
import multiprocessing
import tcfcli.common.base_infor as infor
from multiprocessing import Process, queues
from queue import Queue
from zipfile import ZipFile, ZIP_DEFLATED
from tcfcli.help.message import DeployHelp as help
from tcfcli.common.template import Template
from tcfcli.common.user_exceptions import *
from tcfcli.libs.utils.scf_client import ScfClient, FunctionStatus, ResourceStatus
from tcfcli.common import tcsam
from tcfcli.common.user_config import UserConfig
from tcfcli.common.tcsam.tcsam_macro import TcSamMacro as tsmacro
from tcfcli.libs.utils.cos_client import CosClient
from tcfcli.common.operation_msg import Operation
from tcfcli.common.cam_role import list_scf_role
from tcfcli.cmds.function.information.cli import Information
from tcfcli.common.gitignore import IgnoreList, MATCH_IGNORE
_CURRENT_DIR = '.'
_BUILD_DIR = os.path.join(os.getcwd(), '.scf_build')
DEF_TMP_FILENAME = 'template.yaml'
REGIONS = infor.REGIONS
SERVICE_RUNTIME = infor.SERVICE_RUNTIME
@click.command(short_help=help.SHORT_HELP)
@click.option('--template-file', '-t', default=DEF_TMP_FILENAME, type=click.Path(exists=True), help=help.TEMPLATE_FILE)
@click.option('--cos-bucket', '-c', type=str, help=help.COS_BUCKET)
@click.option('--name', '-n', type=str, help=help.NAME)
@click.option('--namespace', '-ns', type=str, help=help.NAMESPACE)
@click.option('--region', '-r', type=str, help=help.REGION)
@click.option('--forced', '-f', is_flag=True, default=False, help=help.FORCED)
@click.option('--skip-event', is_flag=True, default=False, help=help.SKIP_EVENT)
@click.option('--without-cos', is_flag=True, default=False, help=help.WITHOUT_COS)
@click.option('--history', is_flag=True, default=False, help=help.HISTORY)
@click.option('--update-event', '-ue', is_flag=True, default=False, help=help.UPDATE_EVENT)
@click.option('--no-color', '-nc', is_flag=True, default=False, help=help.NOCOLOR)
def deploy(template_file, cos_bucket, name, namespace, region, forced, skip_event, without_cos, history, update_event,
no_color):
'''
\b
Scf cli completes the function package deployment through the deploy subcommand. The scf command line tool deploys the code package, function configuration, and other information specified in the configuration file to the cloud or updates the functions of the cloud according to the specified function template configuration file.
\b
The execution of the scf deploy command is based on the function template configuration file. For the description and writing of the specific template file, please refer to the template file description.
* https://cloud.tencent.com/document/product/583/33454
\b
Common usage:
\b
* Deploy the package
$ scf deploy
\b
* Package the configuration file, and specify the COS bucket as "temp-code-1253970226"
$ scf deploy --cos-bucket temp-code-1253970226
\b
* Deploy history package
$ scf deploy --history
\b
* Upgrade the function and urgrade events
$ scf deploy -f -ue
'''
# 删除缓存目录(临时文件存放)
try:
shutil.rmtree(_BUILD_DIR)
except Exception as e:
Operation(e, err_msg=traceback.format_exc()).no_output()
Deploy(template_file, cos_bucket, name, namespace, region, forced, skip_event, without_cos,
update_event, history).start()
class Deploy(object):
def __init__(self, template_file, cos_bucket, function, deploy_namespace, region, forced, skip_event, without_cos,
update_event, history=None):
self.user_config = UserConfig()
self.template_file = template_file
self.template_file_dir = ""
self.cos_bucket = cos_bucket
self.check_params()
template_data = tcsam.tcsam_validate(Template.get_template_data(self.template_file))
self.resource = template_data.get(tsmacro.Resources, {})
self.function = function
self.deploy_namespace = deploy_namespace
self.region = region if region else self.user_config.region
self.cos_region = "ap-guangzhou" if self.region == "ap-guangzhou-open" else self.region
self.without_cos = without_cos
self.history = history
self.bucket_name = "scf-deploy-" + self.region
self.forced = forced
self.skip_event = skip_event
self.update_event = update_event
def check_params(self):
if not self.template_file:
raise TemplateNotFoundException("FAM Template Not Found. Missing option --template-file")
if not os.path.isfile(self.template_file):
raise TemplateNotFoundException("FAM Template Not Found, template-file Not Found")
self.template_file = os.path.abspath(self.template_file)
self.template_file_dir = os.path.dirname(os.path.abspath(self.template_file))
if self.cos_bucket and self.cos_bucket.endswith("-" + self.user_config.appid):
self.cos_bucket = self.cos_bucket.replace("-" + self.user_config.appid, '')
def start(self):
# 判断函数是否存在
if self.function and "'%s'" % str(self.function) not in str(self.resource):
raise DeployException("Couldn't find the function in YAML, please add this function in YAML.")
if self.without_cos:
Operation(
"Because of --without-cos, this time won't be uploaded packages to the default COS-Bucket.").process()
elif self.cos_bucket:
Operation("Because of --cos-bucket, this time will be uploaded packages to the COS-Bucket: %s." % (
self.cos_bucket)).process()
cos_client = CosClient(self.region)
Operation("Checking %s COS-Bucket: %s." % (self.region, self.cos_bucket)).process()
default_bucket = self.cos_bucket + "-" + self.user_config.appid
if cos_client.get_bucket(default_bucket) == 0:
err_msg = "The COS-Bucket %s could not be found in %s. Please check it." % (
self.cos_bucket, self.region)
raise COSBucketException(err_msg)
elif self.user_config.using_cos.upper().startswith("TRUE"):
default_bucket = self.bucket_name + "-" + self.user_config.appid
Operation("By default, this time will be uploaded packages to COS-Bucket.").information()
Operation("Region: %s, COS-Bucket: %s" % (self.region, default_bucket)).information()
Operation(
"If you don't want to upload packages to COS-Bucket by default, you can use the close command: scf configure set --using-cos N").information()
cos_client = CosClient(self.region)
Operation("Checking %s COS-Bucket: %s." % (self.region, default_bucket)).process()
if cos_client.get_bucket(default_bucket) == 0:
# 未获得到bucket
Operation("Creating default COS Bucket: " + default_bucket).process()
create_status = cos_client.create_bucket(bucket=default_bucket)
if create_status == True:
Operation("Creating default COS-Bucket success.").success()
Operation("Region: %s, COS-Bucket: %s" % (self.region, default_bucket)).information()
else:
Operation("Creating %s Cos Bucket: %s faild." % (self.region, default_bucket)).exception()
try:
if "<?xml" in str(create_status):
error_code = re.findall("<Code>(.*?)</Code>", str(create_status))[0]
error_message = re.findall("<Message>(.*?)</Message>", str(create_status))[0]
err_msg = "COS client error code: %s, message: %s" % (error_code, error_message)
else:
err_msg = str(create_status)
except Exception as e:
err_msg = "Failed to create COS-Bucket. Please check if you have related operation permissions."
Operation(e, err_msg=traceback.format_exc()).no_output()
raise COSBucketException(err_msg)
else:
Operation(
"If you want to increase the upload speed, you can use --using-cos, the open command:scf configure set --using-cos Y").information()
error_state = False
for namespace in self.resource: # 遍历函数命名空间
real_namespace = self.deploy_namespace if self.deploy_namespace else namespace
rep = ScfClient(self.region).get_ns(real_namespace)
if not rep:
Operation("%s: Namespace not exists, create it now" % (real_namespace)).process()
err = ScfClient(self.region).create_ns(real_namespace)
if err is not None:
if sys.version_info[0] == 3:
s = err.get_message()
else:
s = err.get_message().encode("UTF-8")
err_msg = "%s: Create namespace failure. Error: %s." % (real_namespace, s)
Operation(u' %s' % text(err_msg), fg="red").exception()
return False
li = queues.Queue(1000, ctx=multiprocessing) if platform.python_version() >= '3' else queues.Queue(1000)
workflow_process = None
temp_function_list = []
for function in list(self.resource[namespace]): # 遍历函数
# 去除掉函数类型行
if function == tsmacro.Type:
continue
# 如果用户指定Function,则只提取用户已有的Function
if self.function is not None and function != self.function:
continue
temp_function_list.append(function)
# function_count = 0
# function_list = []
# function_team = []
# for eve_function in temp_function_list:
# function_team.append(eve_function)
# function_count = function_count + 1
# if len(function_team) == 10 or function_count == len(temp_function_list):
# function_list.append(function_team)
# function_team = []
#
#
# function_count = len(function_list)
# max_thread = int(50 / function_count)
# max_thread = 2 if max_thread < 2 else max_thread
# #
#
# final_function_list = []
# for function_team in function_list:
# for function in function_team:
# # 前置判断完成,进行workflow: package -> deploy function -> deploy trigger
# # 此处采用多进程实现
# # self.workflow(namespace, function, message)
#
# if self.history:
# self.workflow(namespace, real_namespace, function, li, 10)
# else:
# workflow_process = Process(
# target=self.workflow,
# args=(namespace, real_namespace, function, li, max_thread)
# )
# workflow_process.start()
#
# if workflow_process:
# workflow_process.join()
#
# result_list = []
# while len(function_team) != 0:
# temp_li = li.get()
# result_list.append(temp_li)
# final_function_list.append(temp_li)
# time.sleep(0.1)
# if len(result_list) == len(function_team):
# break
function_count = 0
result_list = []
function_total = len(temp_function_list)
max_thread = int(50 / (function_total if function_total <= 10 else 10))
max_thread = 2 if max_thread < 2 else max_thread
max_funtion = 10
for eve_function in temp_function_list:
function_count = function_count + 1
if function_count >= max_funtion or function_count == function_total:
if workflow_process:
workflow_process.join()
while True:
try:
temp_li = li.get(timeout=0.5)
result_list.append(temp_li)
max_funtion = max_funtion + 1
except:
break
if self.history:
self.workflow(namespace, real_namespace, eve_function, li, 10)
else:
workflow_process = Process(
target=self.workflow,
args=(namespace, real_namespace, eve_function, li, max_thread)
)
workflow_process.start()
while function_total != 0:
temp_li = li.get()
result_list.append(temp_li)
if len(result_list) == function_total:
break
self.function_output(result_list, real_namespace)
if not error_state:
error_state = True if "False" in str(result_list) else False
if error_state:
raise DeployException("Not all deployments were successful, please check!")
# 删除缓存目录(临时文件存放)
try:
shutil.rmtree(_BUILD_DIR)
except Exception as e:
Operation(e, err_msg=traceback.format_exc()).no_output()
def workflow(self, namespace, real_namespace, function, li, max_thread):
function_message = {
"function": function,
"package": None,
"deploy_function": None,
"deploy_trigger": None,
}
function_resource = self.package(namespace, real_namespace, function, max_thread)
if (not function_resource) or (function not in function_resource[real_namespace]):
function_message["package"] = False
else:
function_message["package"] = True
deploy_result = self.deploy(function_resource[real_namespace][function], real_namespace, function)
if deploy_result == False:
function_message["deploy_function"] = False
else:
function_message["deploy_function"] = True
function_message["deploy_trigger"] = deploy_result[1] if len(deploy_result) == 2 else None
li.put(function_message)
# message.append(function)
def package(self, namespace, real_namespace, function, max_thread):
function_resource = {
real_namespace: {
"Type": "TencentCloud::Serverless::Namespace",
function: self.resource[namespace][function]
}
}
if self.history:
code_url = self.package_history(namespace, function)
else:
code_url = self.package_core(namespace, real_namespace, function, max_thread)
if code_url:
try:
if "cos_bucket_name" in code_url: # 使用了cos_bucket或者是using_cos
bucket_name = code_url["cos_bucket_name"]
object_name = code_url["cos_object_name"]
function_resource[real_namespace][function][tsmacro.Properties]["CosBucketName"] = bucket_name
function_resource[real_namespace][function][tsmacro.Properties]["CosObjectName"] = object_name
elif "zip_file" in code_url: # 未使用using_cos或者使用without-cos,
function_resource[real_namespace][function][tsmacro.Properties]["LocalZipFile"] = code_url[
"zip_file"]
else:
Operation("%s - %s: Package Failed" % (real_namespace, function)).exception()
del self.resource[namespace][function]
except Exception as e:
Operation("%s - %s: %s" % (real_namespace, function, str(e)), err_msg=traceback.format_exc()).warning()
del self.resource[namespace][function]
else:
return None
return function_resource
def package_core(self, namespace, real_namespace, function, max_thread):
template_path, template_name = os.path.split(self.template_file)
code_uri = self.resource[namespace][function][tsmacro.Properties].get(tsmacro.CodeUri, "")
if isinstance(code_uri, dict):
return {
"cos_bucket_name": code_uri["Bucket"],
"cos_object_name": code_uri["Key"]
}
function_path = os.path.join(template_path, code_uri)
zip_result = self.package_zip_core(function_path, real_namespace, function)
if | |
ms15-051 local priv
#
# v24 2015-01-30
# - added --sub/-s command in order to display output of msids as linked
# this aides in demonstrating what patches need to be applied precisely.
# this change was implemented in v23, but only followed the depth to level
# 1 instead of the entire way.
# - fixed a bug that know allows for multiple supercedes msids in the db
# - allowed for getarchitecture to be recursive, and reduced redunancy when
# it is called throughout the program
# - added ms14-070
#
# v23 2015-01-26
# - typo in --local flag case (pontential vs potential). issue #5 closed.
#
# v22 2015-01-23
# - speed optimisations! it was too slow beforehand. realised i could easily
# make it a bit more efficient
#
# v21 2015-01-22
# - changed display formatting to include nested/linked MS numbers. makes it
# easier to determine the dependencies
# - made args global
# - changed some code formatting, including double-space instead of \t
# - added some additional comments
# - disable ANSI output if on windows platform
# - added recent exploits
#
# v20 2014-12-16
# - added ms14-068,ms14-064,ms14-060, and ms14-058 to the internal vuln list
#
# v19 2014-10-08
# - added support for windows server 2012, this includes ignoring the
# architecture for 2012, and forcing from 32-bit to 64-bit
#
# v18 2014-09-02
# - added ms14-029 poc
#
# v17 2014-08-05
# - fixed a bug where it would not detect OS version when a unicode char comes
# before search string
#
# v16 2014-07-28
# - improved reading of various file encodings for systeminfo. now attempts to
# detect the file first, otherwise loops through common encodings
# - improved OS, service pack, architecture, and release detection. this is now
# not English-dependent as it was previously
# - better architecture detection of systeminfo input (look for -based string)
# - added /usr/bin/env python
# - added ms14-035 poc
#
# v15 2014-07-15
# - changed file open to io, and attempt to decode as utf-8; otherwise attempt
# utf-16
#
# v14 2014-07-13
# - allowed for --ostext flag to properly supersede OS detection of systeminfo
# input
#
# v13a 2014-07-01
# - added new msf flags for ms13-097, and ms14-009
#
# v12a 2014-06-06
# - quick cleanup for release
#
# v11a 2014-05-02
# - fixed the bulletin scrape regex for the update command. ms changed it
#
# v10a 2014-03-24
# - added a hotfixes argument, that can be used to supplement the list
# of hotfixes detected in the systeminfo input
# - added severity at the end of the output when reporting bulletins
# - added a 'patches' argument, that can be used to determine any
# of the hotfixes for a specific bulletin. this is good for debugging.
#
# v09a 2014-03-18
# - again, another massive bug on the linked kb searching function
# getlinkedms(). should be fixed now
# - also checks columns 11 and 12 for superseded, i think it has to
# do with dos and *nix output
#
# v08a 2014-02-14
# - bug where the superseded column wasn't being checked
# this may be because it's only xlsx and it parsed differently in csv
# - added some new exploits from edb
#
# v07a 2014-02-12
# - added indicator for os version, and in green
# - better parsing of architecture for itanium based support
#
# v06a 2014-01-19
# - added 'ostext' or 'o' option, when don't have any patch information
# but just know the OS
#
# v05a
# - added a check for "Kernel version" column, as well as "OS version"
#
# v04a
# - added support for XLSX files directly with the updated XLRD library, this
# requires the python-xlrd library to be installed and upgraded with:
# $ pip install xlrd --upgrade
# - changed MS13-101 to E, as there isn't a metasploit module (yet!)
#
# v03a
# - fixed an issue where component KB wasn't being checked
#
# FUNCTIONS
#
# def main():
# def run(database):
# def detect_encoding(filename):
# def trace(database):
# def patches(database):
# def getversion(name, release, servicepack, architecture):
# def getname(ostext):
# def getrelease(ostext):
# def getservicepack(ostext):
# def getarchitecture(ostext):
# def getitanium(ostext):
# def getpatch(ostext):
# def getbulletinids(haystack):
# def isaffected(name, release, servicepack, architecture, haystack):
# def getlinkedms(msids, database):
# def getexploit(msid = 0):
# def update():
# def merge_list(li):
#
import re
import platform
import argparse
import subprocess
import csv
import StringIO
import os
import datetime
import urllib2
import io
from random import randint
from time import sleep
from tempfile import NamedTemporaryFile
from sys import exit
# constants/globals
MSSB_URL = 'http://www.microsoft.com/en-gb/download/confirmation.aspx?id=36982'
BULLETIN_URL = 'http://download.microsoft.com/download/6/7/3/673E4349-1CA5-40B9-8879-095C72D5B49D/BulletinSearch.xlsx'
VERSION = "3.3"
# global parser
parser = argparse.ArgumentParser(description="search microsoft security bulletins for exploits based upon the patch level of the machine by feeding in systeminfo command")
parser.add_argument("-v", "--verbose", help="verbose output", action="store_true")
parser.add_argument("-i", "--systeminfo", help="feed in an input file that contains the 'systeminfo' command")
parser.add_argument("-d", "--database", help="the file that contains the microsoft security bulletin database")
parser.add_argument("-u", "--update", help="required flag to even run the script", action="store_true")
parser.add_argument("-a", "--audit", help="show all entries, not only exploits", action="store_true")
parser.add_argument("-t", "--trace", help="used to determine linked ms bulletins")
parser.add_argument("-p", "--patches", help="used to determine specific patches for a ms bulletin")
parser.add_argument("-o", "--ostext", help="a loose text representation of the windows OS (ex: \"windows xp home edition sp2\")")
parser.add_argument("-s", "--sub", help="generate output using linked/sub bulletins. WARNING: SLOW!", action="store_true")
parser.add_argument("-2", "--duplicates", help="allow duplicate ms bulletin output within the results. this will produce a lot of output, but is useful when determining linked ms bulletins", action="store_true")
parser.add_argument("-q", "--quiet", help="don't show exploit information. shorter output", action="store_true")
# hotfixes
# used to parse "wmic qfe list full" input, and to solve the 'File 1' errors
parser.add_argument("-H", "--hotfixes", help="a loose list of hotfixes to be added, for use with the following command: 'wmic qfe list full'")
# search by exploit type only
exptypegroup = parser.add_mutually_exclusive_group()
exptypegroup.add_argument("-r", "--remote", help="search remote exploits only", action="store_true")
exptypegroup.add_argument("-l", "--local", help="search local exploits only", action="store_true")
# global args parsed
ARGS = parser.parse_args()
def main():
ALERT("initiating winsploit version %s..." % VERSION)
database = ''
# if there is a database switch
if ARGS.database:
# split name and extension
name, extension = os.path.splitext(ARGS.database)
# csv
if 'csv' in extension:
ALERT("database file detected as csv based on extension", ALERT.NORMAL)
# attempt to open the file
try:
dbfile = open(ARGS.database, 'r')
except IOError, e:
ALERT("could not open the file %s" % filename, ALERT.BAD)
exit(1)
data = ''
for line in dbfile:
data += line
database = data
dbfile.close()
# xls or xslx
elif 'xls' in extension:
ALERT("database file detected as xls or xlsx based on extension", ALERT.NORMAL)
try:
import xlrd
except ImportError as e:
ALERT("please install and upgrade the python-xlrd library", ALERT.BAD)
exit(1)
# open the xls file
try:
wb = xlrd.open_workbook(ARGS.database)
except IOError as e:
ALERT("no such file or directory '%s'. ensure you have the correct database file passed in --database/-d" % ARGS.database, ALERT.BAD)
exit(1)
#sh = wb.sheet_by_name('Export Bulletin Search Spreadsh')
sh = wb.sheet_by_index(0)
# read the spreadsheet into a temp file
f = NamedTemporaryFile(mode='wb')
wr = csv.writer(f, quoting=csv.QUOTE_NONE, delimiter=',')
data = ''
# loop through xls
for rownum in xrange(sh.nrows):
values = sh.row_values(rownum)
# loop through row values, and process input
for i in range(len(values)):
values[i] = unicode(values[i]).encode('utf8')
values[i] = values[i].replace('\n',' ')
values[i] = values[i].replace(',','')
values[i] = values[i].replace('.0','')
data += ",".join(values)
data += '\n'
# set the database to the csv data
database = data
# unknown filetype, error
else:
ALERT("unknown filetype. change file extension to indicate csv or xls/xlsx", ALERT.BAD)
exit(1)
if ARGS.trace: trace(database)
elif ARGS.systeminfo or ARGS.ostext: run(database)
elif ARGS.update: update()
elif ARGS.patches: patches(database)
# error
else:
ALERT("an error occured while running, not enough arguments", ALERT.BAD)
exit(1)
ALERT("done")
# end main()
def run(database):
# variables used
ostext=None
name=None
release=None
servicepack=None
# will default to 32-bit, but can be 64 bit or itanium
architecture=None
hotfixes=set([])
bulletinids=set([])
potential=[]
vulns={}
ids=set([])
cmdoutput = []
# test for database
if not ARGS.database:
ALERT("please supply a MSSB database file with the --database or -d flag, this can be downloaded using the --update command", ALERT.BAD)
exit(1)
# read from ostext first
if ARGS.ostext:
ALERT("getting OS information from command line text")
name=getname(ARGS.ostext)
release=getrelease(ARGS.ostext)
servicepack=getservicepack(ARGS.ostext)
architecture=getarchitecture(ARGS.ostext)
# the os name at least has to be identified
if not name:
ALERT("unable to determine the windows version command line text from '%s'" % ARGS.ostext, ALERT.BAD)
exit(1)
# get the systeminfo information from the input file
if ARGS.systeminfo:
ALERT("attempting to read from the systeminfo input file")
# when reading the systeminfo file, we want to attempt to detect | |
layer**
# In[ ]:
# print("========randomize last layer")
# layer = model.layers[-4] # the last convolutional layer
# weights = layer.get_weights()
#
# new_kernel = np.random.normal(size=weights[0].shape)/(GRID_H*GRID_W)
# new_bias = np.random.normal(size=weights[1].shape)/(GRID_H*GRID_W)
#
# layer.set_weights([new_kernel, new_bias])
# # Perform training
# **Loss function**
# $$\begin{multline}
# \lambda_\textbf{coord}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left[
# \left(
# x_i - \hat{x}_i
# \right)^2 +
# \left(
# y_i - \hat{y}_i
# \right)^2
# \right]
# \\
# + \lambda_\textbf{coord}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left[
# \left(
# \sqrt{w_i} - \sqrt{\hat{w}_i}
# \right)^2 +
# \left(
# \sqrt{h_i} - \sqrt{\hat{h}_i}
# \right)^2
# \right]
# \\
# + \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left(
# C_i - \hat{C}_i
# \right)^2
# \\
# + \lambda_\textrm{noobj}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{noobj}}
# \left(
# C_i - \hat{C}_i
# \right)^2
# \\
# + \sum_{i = 0}^{S^2}
# L_i^{\text{obj}}
# \sum_{c \in \textrm{classes}}
# \left(
# p_i(c) - \hat{p}_i(c)
# \right)^2
# \end{multline}$$
# In[ ]:
import backend
def predict(model, image, i, img_name, path=""):
"""
input_size = IMAGE_H
image_h, image_w, _ = image.shape
feature_extractor = backend.FullYoloFeature()
image = cv2.resize(image, (input_size, input_size))
image =feature_extractor.normalize(image)
input_image = image[:,:,::-1]
input_image = np.expand_dims(input_image, 0)
dummy_array = np.zeros((1,1,1,1, MAX_BOX_PER_IMAGE,4))
netout = model.predict([input_image, dummy_array])[0]
boxes = decode_netout(netout, ANCHORS, len(LABELS))
"""
dummy_array = np.zeros((1, 1, 1, 1, TRUE_BOX_BUFFER, 4))
# print("dummy array:", dummy_array)
plt.figure(figsize=(10, 10))
input_image = cv2.resize(image, (416, 416))
input_image = input_image / 255.
input_image = input_image[:, :, ::-1]
input_image = np.expand_dims(input_image, 0)
netout = model.predict([input_image, dummy_array])
boxes = decode_netout(netout[0],
obj_threshold=OBJ_THRESHOLD,
nms_threshold=NMS_THRESHOLD,
anchors=ANCHORS,
nb_class=CLASS)
image = draw_boxes(image, boxes, labels=LABELS)
plt.imshow(image[:, :, ::-1])
path = str(path)
if i <= 100:
# Create target directory & all intermediate directories if don't exists
if not os.path.exists(path):
os.makedirs(path)
print("Directory ", path, " Created ")
else:
pass
# print("Directory ", path, " already exists")
#os.makedirs(path) # create the directory on given path, also if any intermediate-level directory don’t exists then it will create that too.
plt.savefig(path+ "/" + img_name)
return boxes
from utils import decode_netout, compute_overlap, compute_ap
from os.path import normpath, basename
def evaluate(model, generator,
iou_threshold=0.3,
score_threshold=0.3,
max_detections=100,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = generator.load_image(i)
path = generator.images[i]['filename']
img_name = basename(normpath(path))
raw_height, raw_width, raw_channels = raw_image.shape
# make the boxes and the labels
pred_boxes = predict(model, raw_image, i, img_name, path=save_path)
score = np.array([box.score for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,
box.ymax * raw_height, box.score] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
import pickle
f = open(save_path+"/mAP.pkl", "wb")
pickle.dump(average_precisions, f)
f.close()
return average_precisions
def custom_loss(y_true, y_pred):
mask_shape = tf.shape(y_true)[:4]
cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(GRID_W), [GRID_H]), (1, GRID_H, GRID_W, 1, 1)))
cell_y = tf.transpose(cell_x, (0, 2, 1, 3, 4))
cell_grid = tf.tile(tf.concat([cell_x, cell_y], -1), [BATCH_SIZE, 1, 1, 5, 1])
coord_mask = tf.zeros(mask_shape)
conf_mask = tf.zeros(mask_shape)
class_mask = tf.zeros(mask_shape)
seen = tf.Variable(0.)
total_recall = tf.Variable(0.)
"""
Adjust prediction
"""
### adjust x and y
pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid
### adjust w and h
pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(ANCHORS, [1, 1, 1, BOX, 2])
### adjust confidence
pred_box_conf = tf.sigmoid(y_pred[..., 4])
### adjust class probabilities
pred_box_class = y_pred[..., 5:]
"""
Adjust ground truth
"""
### adjust x and y
true_box_xy = y_true[..., 0:2] # relative position to the containing cell
### adjust w and h
true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically
### adjust confidence
true_wh_half = true_box_wh / 2.
true_mins = true_box_xy - true_wh_half
true_maxes = true_box_xy + true_wh_half
pred_wh_half = pred_box_wh / 2.
pred_mins = pred_box_xy - pred_wh_half
pred_maxes = pred_box_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]
pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
true_box_conf = iou_scores * y_true[..., 4]
### adjust class probabilities
true_box_class = tf.argmax(y_true[..., 5:], -1)
"""
Determine the masks
"""
### coordinate mask: simply the position of the ground truth boxes (the predictors)
coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * COORD_SCALE
### confidence mask: penelize predictors + penalize boxes with low IOU
# penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6
true_xy = true_boxes[..., 0:2]
true_wh = true_boxes[..., 2:4]
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = tf.expand_dims(pred_box_xy, 4)
pred_wh = tf.expand_dims(pred_box_wh, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
best_ious = tf.reduce_max(iou_scores, axis=4)
conf_mask = conf_mask + tf.to_float(best_ious < 0.6) * (1 - y_true[..., 4]) * NO_OBJECT_SCALE
# penalize the confidence of the boxes, which are reponsible for corresponding ground truth box
conf_mask = conf_mask + y_true[..., 4] * OBJECT_SCALE
### class mask: simply the position of the ground truth boxes (the predictors)
class_mask = y_true[..., 4] * tf.gather(CLASS_WEIGHTS, true_box_class) * CLASS_SCALE
"""
Warm-up training
"""
no_boxes_mask = tf.to_float(coord_mask < COORD_SCALE / 2.)
seen = tf.assign_add(seen, 1.)
true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, WARM_UP_BATCHES),
lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask,
true_box_wh + tf.ones_like(true_box_wh) * np.reshape(
ANCHORS, [1, 1, 1, BOX, 2]) * no_boxes_mask,
tf.ones_like(coord_mask)],
lambda: [true_box_xy,
true_box_wh,
coord_mask])
"""
Finalize the loss
"""
nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))
nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))
nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))
loss_xy = tf.reduce_sum(tf.square(true_box_xy - pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_wh = tf.reduce_sum(tf.square(true_box_wh - pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_conf = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask) / (nb_conf_box + 1e-6) / 2.
loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)
loss_class = tf.reduce_sum(loss_class * | |
<filename>flexget/utils/imdb.py
from __future__ import unicode_literals, division, absolute_import
import difflib
import logging
import re
from bs4.element import Tag
from flexget.utils.soup import get_soup
from flexget.utils.requests import Session
from flexget.utils.tools import str_to_int
from flexget.plugin import get_plugin_by_name
log = logging.getLogger('utils.imdb')
# IMDb delivers a version of the page which is unparsable to unknown (and some known) user agents, such as requests'
# Spoof the old urllib user agent to keep results consistent
requests = Session()
requests.headers.update({'User-Agent': 'Python-urllib/2.6'})
#requests.headers.update({'User-Agent': random.choice(USERAGENTS)})
# this makes most of the titles to be returned in english translation, but not all of them
requests.headers.update({'Accept-Language': 'en-US,en;q=0.8'})
# give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1)
requests.set_domain_delay('imdb.com', '3 seconds')
def is_imdb_url(url):
"""Tests the url to see if it's for imdb.com."""
if not isinstance(url, basestring):
return
# Probably should use urlparse.
return re.match(r'https?://[^/]*imdb\.com/', url)
def extract_id(url):
"""Return IMDb ID of the given URL. Return None if not valid or if URL is not a string."""
if not isinstance(url, basestring):
return
m = re.search(r'((?:nm|tt)[\d]{7})', url)
if m:
return m.group(1)
def make_url(imdb_id):
"""Return IMDb URL of the given ID"""
return u'http://www.imdb.com/title/%s/' % imdb_id
class ImdbSearch(object):
def __init__(self):
# de-prioritize aka matches a bit
self.aka_weight = 0.95
# prioritize first
self.first_weight = 1.1
self.min_match = 0.7
self.min_diff = 0.01
self.debug = False
self.max_results = 10
def ireplace(self, text, old, new, count=0):
"""Case insensitive string replace"""
pattern = re.compile(re.escape(old), re.I)
return re.sub(pattern, new, text, count)
def smart_match(self, raw_name):
"""Accepts messy name, cleans it and uses information available to make smartest and best match"""
parser = get_plugin_by_name('parsing').instance.parse_movie(raw_name)
name = parser.name
year = parser.year
if name == '':
log.critical('Failed to parse name from %s' % raw_name)
return None
log.debug('smart_match name=%s year=%s' % (name, str(year)))
return self.best_match(name, year)
def best_match(self, name, year=None):
"""Return single movie that best matches name criteria or None"""
movies = self.search(name)
if not movies:
log.debug('search did not return any movies')
return None
# remove all movies below min_match, and different year
for movie in movies[:]:
if year and movie.get('year'):
if movie['year'] != str(year):
log.debug('best_match removing %s - %s (wrong year: %s)' % (movie['name'], movie['url'], str(movie['year'])))
movies.remove(movie)
continue
if movie['match'] < self.min_match:
log.debug('best_match removing %s (min_match)' % movie['name'])
movies.remove(movie)
continue
if not movies:
log.debug('FAILURE: no movies remain')
return None
# if only one remains ..
if len(movies) == 1:
log.debug('SUCCESS: only one movie remains')
return movies[0]
# check min difference between best two hits
diff = movies[0]['match'] - movies[1]['match']
if diff < self.min_diff:
log.debug('unable to determine correct movie, min_diff too small (`%s` <-?-> `%s`)' %
(movies[0], movies[1]))
for m in movies:
log.debug('remain: %s (match: %s) %s' % (m['name'], m['match'], m['url']))
return None
else:
return movies[0]
def search(self, name):
"""Return array of movie details (dict)"""
log.debug('Searching: %s' % name)
url = u'http://www.imdb.com/find'
# This will only include movies searched by title in the results
params = {'q': name, 's': 'tt', 'ttype': 'ft'}
log.debug('Serch query: %s' % repr(url))
page = requests.get(url, params=params)
actual_url = page.url
movies = []
# in case we got redirected to movie page (perfect match)
re_m = re.match(r'.*\.imdb\.com/title/tt\d+/', actual_url)
if re_m:
actual_url = re_m.group(0)
log.debug('Perfect hit. Search got redirected to %s' % actual_url)
movie = {}
movie['match'] = 1.0
movie['name'] = name
movie['url'] = actual_url
movie['imdb_id'] = extract_id(actual_url)
movie['year'] = None # skips year check
movies.append(movie)
return movies
# the god damn page has declared a wrong encoding
soup = get_soup(page.text)
section_table = soup.find('table', 'findList')
if not section_table:
log.debug('results table not found')
return
rows = section_table.find_all('td', 'result_text')
if not rows:
log.debug('Titles section does not have links')
for count, row in enumerate(rows):
# Title search gives a lot of results, only check the first ones
if count > self.max_results:
break
movie = {}
additional = re.findall(r'\((.*?)\)', row.text)
if len(additional) > 0:
movie['year'] = additional[-1]
link = row.find_next('a')
movie['name'] = link.text
movie['url'] = 'http://www.imdb.com' + link.get('href')
movie['imdb_id'] = extract_id(movie['url'])
log.debug('processing name: %s url: %s' % (movie['name'], movie['url']))
# calc & set best matching ratio
seq = difflib.SequenceMatcher(lambda x: x == ' ', movie['name'].title(), name.title())
ratio = seq.ratio()
# check if some of the akas have better ratio
for aka in link.parent.find_all('i'):
aka = aka.next.string
match = re.search(r'".*"', aka)
if not match:
log.debug('aka `%s` is invalid' % aka)
continue
aka = match.group(0).replace('"', '')
log.trace('processing aka %s' % aka)
seq = difflib.SequenceMatcher(lambda x: x == ' ', aka.title(), name.title())
aka_ratio = seq.ratio()
if aka_ratio > ratio:
ratio = aka_ratio * self.aka_weight
log.debug('- aka `%s` matches better to `%s` ratio %s (weighted to %s)' %
(aka, name, aka_ratio, ratio))
# prioritize items by position
position_ratio = (self.first_weight - 1) / (count + 1) + 1
log.debug('- prioritizing based on position %s `%s`: %s' % (count, movie['url'], position_ratio))
ratio *= position_ratio
# store ratio
movie['match'] = ratio
movies.append(movie)
movies.sort(key=lambda x: x['match'], reverse=True)
return movies
class ImdbParser(object):
"""Quick-hack to parse relevant imdb details"""
def __init__(self):
self.genres = []
self.languages = []
self.actors = {}
self.directors = {}
self.score = 0.0
self.votes = 0
self.year = 0
self.plot_outline = None
self.name = None
self.original_name = None
self.url = None
self.imdb_id = None
self.photo = None
self.mpaa_rating = ''
def __str__(self):
return '<ImdbParser(name=%s,imdb_id=%s)>' % (self.name, self.imdb_id)
def parse(self, imdb_id):
self.imdb_id = extract_id(imdb_id)
url = make_url(self.imdb_id)
self.url = url
page = requests.get(url)
soup = get_soup(page.text)
# get photo
tag_photo = soup.find('td', attrs={'id': 'img_primary'})
if tag_photo:
tag_img = tag_photo.find('img')
if tag_img:
self.photo = tag_img.get('src')
log.debug('Detected photo: %s' % self.photo)
# get rating. contentRating <span> in infobar.
tag_infobar_div = soup.find('div', attrs={'class': 'infobar'})
if tag_infobar_div:
tag_mpaa_rating = tag_infobar_div.find('span', attrs={'itemprop': 'contentRating'})
if tag_mpaa_rating:
if not tag_mpaa_rating.get('class') or not tag_mpaa_rating['class'][0].startswith('us_'):
log.warning('Could not determine mpaa rating for %s' % url)
else:
rating_class = tag_mpaa_rating['class'][0]
if rating_class == 'us_not_rated':
self.mpaa_rating = 'NR'
else:
self.mpaa_rating = rating_class.lstrip('us_').replace('_', '-').upper()
log.debug('Detected mpaa rating: %s' % self.mpaa_rating)
else:
log.debug('Unable to match signature of mpaa rating for %s - '
'could be a TV episode, or plugin needs update?' % url)
else:
# We should match the infobar, it's an integral part of the IMDB page.
log.warning('Unable to get infodiv class for %s - plugin needs update?' % url)
# get name
tag_name = soup.find('h1')
if tag_name:
tag_name = tag_name.find('span', attrs={'itemprop': 'name'})
if tag_name:
self.name = tag_name.text
log.debug('Detected name: %s' % self.name)
else:
log.warning('Unable to get name for %s - plugin needs update?' % url)
tag_original_title_i = soup.find('i', text=re.compile(r'original title'))
if tag_original_title_i:
span = tag_original_title_i.parent
tag_original_title_i.decompose()
self.original_name = span.text.strip().strip('"')
log.debug('Detected original name: %s' % self.original_name)
else:
# if title is already in original language, it doesn't have the tag
log.debug('Unable to get original title for %s - it probably does not exists' % url)
star_box = soup.find('div', attrs={'class': 'star-box giga-star'})
if star_box:
# detect if movie is eligible for ratings
rating_ineligible = star_box.find('div', attrs={'class': 'rating-ineligible'})
if rating_ineligible:
log.debug('movie is not eligible for ratings')
else:
# get votes
tag_votes = star_box.find(itemprop='ratingCount')
if tag_votes:
self.votes = str_to_int(tag_votes.string) or 0
log.debug('Detected votes: %s' % self.votes)
else:
log.warning('Unable to get votes for %s - plugin needs update?' % url)
# get score - find the ratingValue item that contains a numerical value
span_score = star_box.find(itemprop='ratingValue', text=re.compile('[\d\.]+'))
if span_score:
try:
self.score = float(span_score.string)
except (ValueError, TypeError):
log.debug('tag_score %r is not valid float' % span_score.string)
log.debug('Detected score: %s' % self.score)
else:
log.warning('Unable to get score for %s - plugin needs update?' % url)
else:
log.warning('Unable to find score/vote section for %s - plugin needs update?' % url)
# get genres
genres = soup.find('div', itemprop='genre')
if genres:
for link in genres.find_all('a'):
self.genres.append(link.text.strip().lower())
else:
log.warning('Unable to find genres section for %s - plugin needs update?' % url)
# get languages
for link in soup.find_all('a', href=re.compile('/language/.*')):
# skip non-primary languages "(a few words)", etc.
m = re.search('(?x) \( [^()]* \\b few \\b', link.next_sibling)
if not m:
lang = link.text.lower()
if not lang in self.languages:
self.languages.append(lang.strip())
# get year
tag_year = soup.find('a', attrs={'href': re.compile('^/year/\d+')})
if tag_year:
self.year = int(tag_year.text)
log.debug('Detected year: %s' % self.year)
elif soup.head.title:
m = re.search(r'(\d{4})\)', soup.head.title.string)
if m:
self.year = int(m.group(1))
log.debug('Detected year: %s' % self.year)
else:
log.warning('Unable to get year for %s (regexp mismatch) - | |
self.approximate_krr_regressions_['target'].kernel_(
self.approximate_krr_regressions_['source'].anchors(),
self.approximate_krr_regressions_['target'].anchors()
)
K_XY = torch.Tensor(K_XY)
return self.approximate_krr_regressions_['source'].sample_weights_.T.matmul(K_XY).matmul(self.approximate_krr_regressions_['target'].sample_weights_)
def _compute_principal_vectors(self, all_PVs=False):
"""
all_PVs indicate whether the data source with the most PVs should be reduced to the number of PVs of the smallest data-source.
Example: source has 10 factors, target 13. all_PVs=True would yield 13 target PVs, all_PVs=False would yield 10.
"""
cosine_svd = np.linalg.svd(self.cosine_sim, full_matrices=all_PVs)
self.principal_angles = cosine_svd[1]
self.untransformed_rotations_ = {
'source': cosine_svd[0],
'target': cosine_svd[2].T
}
self.principal_vectors_coef_ = {
x: self.untransformed_rotations_[x].T.dot(self.sqrt_inv_matrices_[x]).dot(self.approximate_krr_regressions_[x].sample_weights_.T.detach().numpy())
for x in self.untransformed_rotations_
}
def compute_consensus_features(
self,
X_input: dict,
n_similar_pv: int,
fit: bool=True
):
"""
Project the data on interpolated features, i.e., a linear combination of source and target SPVs which best balances the effect of source and target
data.
Parameters
----------
X_input: dict
Dictionary of data (AnnData) to project. Two keys are needed: 'source' and 'target'.
n_similar_pv: int
Number of top SPVs to project the data on.
fit: bool, default to True
Whether the interpolated times must be computed. If False, will use previously computed times, but will return an error if not previously fitted.
Returns
----------
interpolated_proj_df: pd.DataFrame
DataFrame of concatenated source and target samples after projection on consensus features.
"""
X_data_log = {
data_source: self._frobenius_normalisation(
data_source,
torch.log10(torch.Tensor(X_input[data_source].X + 1)),
frob_norm_source=True
) for data_source in ['source', 'target']
}
# Project data on KRR directions
krr_projections = {
pv_data_source: {
proj_data_source: self.approximate_krr_regressions_[pv_data_source].transform(
X_data_log[proj_data_source]
).detach().numpy()
for proj_data_source in ['source', 'target']
} for pv_data_source in ['source', 'target']
}
# Rotate KRR directions to obtain PVs
pv_projections = {}
for pv_data_source in krr_projections:
pv_projections[pv_data_source] = {}
for proj_data_source in krr_projections[pv_data_source]:
rotated_proj = self.untransformed_rotations_[pv_data_source].T
rotated_proj = rotated_proj.dot(self.sqrt_inv_matrices_[pv_data_source])
rotated_proj = rotated_proj.dot(krr_projections[pv_data_source][proj_data_source].T).T
pv_projections[pv_data_source][proj_data_source] = rotated_proj
del rotated_proj
# Mean-center projection data on the PV
pv_projections = {
pv_data_source: {
proj_data_source: StandardScaler(with_mean=True, with_std=False).fit_transform(
pv_projections[pv_data_source][proj_data_source]
) for proj_data_source in ['source', 'target']
} for pv_data_source in ['source', 'target']
}
# Compute optimal interpolation point
if fit:
self.n_similar_pv = n_similar_pv
self.optimal_interpolation_step_ = {
PV_number: compute_optimal_tau(
PV_number, pv_projections, np.arccos(self.principal_angles), n_interpolation=100
) for PV_number in range(self.n_similar_pv)
}
# Project on optimal interpolation time
interpolated_proj_df = {
PV_number: np.concatenate(list(project_on_interpolate_PV(
np.arccos(self.principal_angles)[PV_number],
PV_number,
optimal_step,
pv_projections
))) for PV_number, optimal_step in self.optimal_interpolation_step_.items()
}
return pd.DataFrame(interpolated_proj_df)
def save(
self,
folder: str = '.',
with_krr: bool=True,
with_model: bool=True
):
if not os.path.exists(folder) and not os.path.isdir(folder):
os.mkdir(folder)
# Dump scVI models
if with_model:
for x in self.scvi_models:
dump(
self.scvi_models[x],
open('%s/scvi_model_%s.pkl'%(folder, x), 'wb')
)
self.scvi_models[x].save(
'%s/scvi_model_%s'%(folder, x),
save_anndata=True
)
# Dump the KRR:
if not with_krr:
return True
for x in self.approximate_krr_regressions_:
self.approximate_krr_regressions_[x].save('%s/krr_approx_%s'%(folder, x))
# Save params
pd.DataFrame(self.krr_params).to_csv('%s/krr_params.csv'%(folder))
dump(self.krr_params, open('%s/krr_params.pkl'%(folder), 'wb'))
for param_t in ['model', 'plan', 'train']:
df = pd.DataFrame([self.scvi_params[x][param_t] for x in ['source', 'target']])
df.to_csv('%s/scvi_params_%s.csv'%(folder, param_t))
dump(self.scvi_params, open('%s/scvi_params.pkl'%(folder), 'wb'))
pd.DataFrame(self._fit_params, index=['params']).to_csv('%s/fit_params.csv'%(folder))
dump(self._fit_params, open('%s/fit_params.pkl'%(folder), 'wb'))
# Save results
results_elements = {
'alignment_M_X': self.M_X,
'alignment_M_Y': self.M_Y,
'alignment_M_XY': self.M_XY,
'alignment_cosine_sim': self.cosine_sim,
'alignment_principal_angles': self.principal_angles
}
for idx, element in results_elements.items():
if type(element) is np.ndarray:
np.savetxt('%s/%s.csv'%(folder, idx), element)
np.save(open('%s/%s.npy'%(folder, idx), 'wb'), element)
elif type(element) is torch.Tensor:
np.savetxt('%s/%s.csv'%(folder, idx), element.detach().numpy())
torch.save(element, open('%s/%s.pt'%(folder, idx), 'wb'))
if self._frob_norm_param is not None:
np.savetxt(
'%s/frob_norm_param.csv'%(folder),
np.array([self._frob_norm_param])
)
def load(folder: str = '.', with_krr: bool=True, with_model: bool=True):
"""
Load a Sobolev Alignment instance.
Parameters
----------
folder: str, default to '.'
Folder path where the instance is located
with_krr: bool, default to True
Whether KRR approximations must be loaded.
with_model: bool, default to True
Whether scvi models (VAEs) must be loaded.
Returns
-------
SobolevAlignment: instance saved at the folder location.
"""
clf = SobolevAlignment()
if with_model:
clf.scvi_models = {}
for x in ['source', 'target']:
clf.scvi_models[x] = scvi.model.SCVI.load(
'%s/scvi_model_%s'%(folder, x)
)
if with_krr:
clf.approximate_krr_regressions_ = {}
for x in ['source', 'target']:
clf.approximate_krr_regressions_[x] = KRRApprox.load('%s/krr_approx_%s/'%(folder, x))
# Load params
clf.krr_params = load(open('%s/krr_params.pkl'%(folder), 'rb'))
clf.scvi_params = load(open('%s/scvi_params.pkl'%(folder), 'rb'))
if 'fit_params.pkl' in os.listdir(folder):
clf._fit_params = load(open('%s/fit_params.pkl'%(folder), 'rb'))
# Load results
if 'alignment_M_X.npy' in os.listdir(folder):
clf.M_X = np.load('%s/alignment_M_X.npy'%(folder))
elif 'alignment_M_X.pt' in os.listdir(folder):
clf.M_X = torch.load(open('%s/alignment_M_X.pt'%(folder), 'rb'))
if 'alignment_M_Y.npy' in os.listdir(folder):
clf.M_Y = np.load('%s/alignment_M_Y.npy'%(folder))
elif 'alignment_M_Y.pt' in os.listdir(folder):
clf.M_Y = torch.load(open('%s/alignment_M_Y.pt'%(folder), 'rb'))
if 'alignment_M_XY.npy' in os.listdir(folder):
clf.M_XY = np.load('%s/alignment_M_XY.npy'%(folder))
elif 'alignment_M_XY.pt' in os.listdir(folder):
clf.M_XY = torch.load(open('%s/alignment_M_XY.pt'%(folder), 'rb'))
if 'alignment_cosine_sim.npy' in os.listdir(folder):
clf.cosine_sim = np.load('%s/alignment_cosine_sim.npy'%(folder))
elif 'alignment_cosine_sim.pt' in os.listdir(folder):
clf.cosine_sim = torch.load(open('%s/alignment_cosine_sim.pt'%(folder), 'rb'))
if 'alignment_principal_angles.npy' in os.listdir(folder):
clf.principal_angles = np.load('%s/alignment_principal_angles.npy'%(folder))
elif 'alignment_principal_angles.pt' in os.listdir(folder):
clf.principal_angles = torch.load(open('%s/alignment_principal_angles.pt'%(folder), 'rb'))
clf.sqrt_inv_M_X_ = mat_inv_sqrt(clf.M_X)
clf.sqrt_inv_M_Y_ = mat_inv_sqrt(clf.M_Y)
clf.sqrt_inv_matrices_ = {
'source': clf.sqrt_inv_M_X_,
'target': clf.sqrt_inv_M_Y_
}
clf._compute_principal_vectors()
if 'frob_norm_param.csv' in os.listdir(folder):
clf._frob_norm_param = np.loadtxt(open('%s/frob_norm_param.csv'%(folder), 'r'))
return clf
def plot_training_metrics(self, folder: str='.'):
"""
Plot the different training metric for the source and target scVI modules.
"""
if not os.path.exists(folder) and not os.path.isdir(folder):
os.mkdir(folder)
for x in self.scvi_models:
for metric in self.scvi_models[x].history:
plt.figure(figsize=(6, 4))
plt.plot(self.scvi_models[x].history[metric])
plt.xlabel('Epoch', fontsize=20, color='black')
plt.ylabel(metric, fontsize=20, color='black')
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.tight_layout()
plt.savefig('%s/%s_model_train_%s.png'%(folder, x, metric), dpi=300)
plt.show()
def plot_cosine_similarity(
self,
folder: str='.',
absolute_cos: bool=False
):
if absolute_cos:
sns.heatmap(np.abs(self.cosine_sim), cmap='seismic_r', center=0)
else:
sns.heatmap(self.cosine_sim, cmap='seismic_r', center=0)
plt.xticks(fontsize=12, color='black')
plt.yticks(fontsize=12, color='black')
plt.xlabel('Tumor', fontsize=25, color='black')
plt.ylabel('Cell lines', fontsize=25)
plt.tight_layout()
plt.savefig(
'%s/%scosine_similarity.png' % (folder, 'abs_' if absolute_cos else ''),
dpi=300
)
plt.show()
def compute_error(self, size=-1):
"""
Compute error of the KRR approximation on the input (data used for VAE training) and used for KRR.
:return:
"""
return {
'source': self._compute_error_one_type('source', size=size),
'target': self._compute_error_one_type('target', size=size)
}
def _compute_error_one_type(self, data_type, size=-1):
# KRR error of input data
latent = self.scvi_models[data_type].get_latent_representation()
if self._fit_params['lib_size_norm']:
input_krr_pred = self.scvi_models[data_type].get_normalized_expression(
return_numpy=True,
library_size=DEFAULT_LIB_SIZE
)
else:
input_krr_pred = self.training_data[data_type].X
if self.krr_log_input_:
input_krr_pred = np.log10(input_krr_pred+1)
if data_type == ' target':
input_krr_pred = _frobenius_normalisation(
data_type,
input_krr_pred,
self._frob_norm_param is not None
)
input_krr_pred = StandardScaler(with_mean=self.mean_center, with_std=self.unit_std).fit_transform(input_krr_pred)
input_krr_pred = self.approximate_krr_regressions_[data_type].transform(torch.Tensor(input_krr_pred))
input_spearman_corr = np.array([scipy.stats.spearmanr(x,y)[0] for x,y in zip(input_krr_pred.T, latent.T)])
input_krr_diff = input_krr_pred - latent
input_mean_square = torch.square(input_krr_diff)
input_factor_mean_square = torch.mean(input_mean_square, axis=0)
input_latent_mean_square = torch.mean(input_mean_square)
input_factor_reconstruction_error = np.linalg.norm(input_krr_diff, axis=0) / np.linalg.norm(latent, axis=0)
input_latent_reconstruction_error = np.linalg.norm(input_krr_diff) / np.linalg.norm(latent)
del input_krr_pred, input_mean_square, input_krr_diff
gc.collect()
# KRR error of artificial data
if size > 1:
subsamples = np.random.choice(np.arange(self.artificial_samples_[data_type].shape[0]), size, replace=False)
elif size <= 0:
return {
'factor':{
'MSE': {
'input': input_factor_mean_square.detach().numpy()
},
'reconstruction_error': {
'input': input_factor_reconstruction_error
},
'spearmanr': {
'input': np.array(input_spearman_corr)
},
},
'latent':{
'MSE': {
'input': input_latent_mean_square.detach().numpy()
},
'reconstruction_error': {
'input': input_latent_reconstruction_error
},
'spearmanr': {
'input': np.mean(input_spearman_corr)
},
}
}
else:
subsamples = np.arange(self.artificial_samples_[data_type].shape[0])
training_krr_diff = self.approximate_krr_regressions_[data_type].transform(torch.Tensor(self.artificial_samples_[data_type][subsamples]))
training_spearman_corr = np.array([scipy.stats.spearmanr(x,y)[0] for x,y in zip(training_krr_diff.T, self.artificial_embeddings_[data_type][subsamples].T)])
training_krr_diff = training_krr_diff - self.artificial_embeddings_[data_type][subsamples]
training_krr_factor_reconstruction_error = np.linalg.norm(training_krr_diff, axis=0) / np.linalg.norm(self.artificial_embeddings_[data_type][subsamples], axis=0)
training_krr_latent_reconstruction_error = np.linalg.norm(training_krr_diff) / np.linalg.norm(self.artificial_embeddings_[data_type][subsamples])
return {
'factor':{
'MSE': {
'input': input_factor_mean_square.detach().numpy(),
'artificial': torch.mean(torch.square(training_krr_diff), axis=0).detach().numpy()
},
'reconstruction_error': {
'input': input_factor_reconstruction_error,
'artificial': training_krr_factor_reconstruction_error
},
'spearmanr': {
'input': np.array(input_spearman_corr),
'artificial': np.array(training_spearman_corr)
},
},
'latent':{
'MSE': {
'input': input_latent_mean_square.detach().numpy(),
'artificial': torch.mean(torch.square(training_krr_diff)).detach().numpy()
},
'reconstruction_error': {
'input': input_latent_reconstruction_error,
'artificial': training_krr_latent_reconstruction_error
},
'spearmanr': {
'input': np.mean(input_spearman_corr),
'artificial': np.mean(training_spearman_corr)
},
}
}
def feature_analysis(
self,
max_order: int=1,
gene_names:list=None
):
"""
Computes the gene contributions (feature weights) associated with the KRRs which approximate the latent factors and the SPVs.
Technically, given the kernel machine which approximates a latent factor (KRR), this method computes the weights associated
with the orthonormal basis in the Gaussian-kernel associated Sobolev space.
Parameters
----------
max_order: int, default to 1
Order of the features to compute. 1 corresponds to linear features (genes), two to interaction terms.
gene_names: list of str, default to None
Names of the genes passed as input to Sobolev Alignment. <b>WARNING</b> Must be in the same order as the input to
SobolevAlignment.fit
"""
# Make kernel parameter
if 'gamma' in self.krr_params['source']['kernel_params'] and 'gamma' in self.krr_params['target']['kernel_params']:
gamma_s = self.krr_params['source']['kernel_params']['gamma']
gamma_t = self.krr_params['target']['kernel_params']['gamma']
elif 'sigma' in self.krr_params['source']['kernel_params'] and 'sigma' in self.krr_params['target']['kernel_params']:
gamma_s = 1 / (2 * self.krr_params['source']['kernel_params']['sigma'] ** 2)
gamma_t = 1 / (2 * self.krr_params['target']['kernel_params']['sigma'] ** 2)
assert gamma_s == gamma_t
self.gamma = gamma_s
# Compute the sample offset (matrix O_X and O_Y)
self.sample_offset = {
x:_compute_offset(self.approximate_krr_regressions_[x].anchors(), self.gamma)
for x in self.training_data
}
if gene_names is None:
self.gene_names = self.training_data['source'].columns
else:
self.gene_names = gene_names
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.factor_level_feature_weights_df = {}
for x in self.training_data:
# Computes all the features of order d.
basis_feature_weights_df = higher_order_contribution(
d=max_order,
data=self.approximate_krr_regressions_[x].anchors().cpu().detach().numpy(),
sample_offset=self.sample_offset[x],
| |
#!/usr/bin/env python
"""Parses MARC-format data. The MARC class has a constructor
which takes binary MARC data.
"""
# This file should be available from
# http://www.pobox.com/~asl2/software/PyZ3950/
# and is licensed under the X Consortium license:
# Copyright (c) 2001, <NAME>, <EMAIL>
# All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, provided that the above
# copyright notice(s) and this permission notice appear in all copies of
# the Software and that both the above copyright notice(s) and this
# permission notice appear in supporting documentation.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
# INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Except as contained in this notice, the name of a copyright holder
# shall not be used in advertising or otherwise to promote the sale, use
# or other dealings in this Software without prior written authorization
# of the copyright holder.
# cheshire3: Copied in from PyZ3950 for MARC handling without
# needing Z39.50 support. As I wrote a big chunk and we
# paid Aaron for a big chunk, I don't feel too bad! :)
# --azaroth (2008-09-12)
import sys
import string
from xml.sax.saxutils import escape
import re
from cheshire3 import marc_to_unicode
class MarcError (Exception):
pass
def is_fixed (num):
return num < 10
fieldsep = '\x1e'
sep = '\x1f'#'\x1f' # XXX or 1D for pseudo-marc output from z3950.c
recsep = '\x1d'
intfixre = re.compile('[^0-9]')
debug = 0
# Attributes for SGML DTD (!!!) If not present, then I1 I2
attrHash = { 22 : ['ISDSLvl', 'I2'],
24 : ['StdNum', 'DiffInd'], 28 : ['PubNmTyp', 'NteAdEnty'],
33 : ['DateType', 'EventTyp'], 34 : ['ScapeTyp', 'I2'],
41 : ['TransInd', 'I2'], 45 : ['TimePrd', 'I2'],
50 : ['InLofC', 'CNSrc'], 55 : ['InNLC', 'CNCLSSrc'],
60 : ['InNLM', 'CNSrc'], 70 : ['InNAL', 'I2'],
72 : ['I1', 'CodeSrc'], 82 : ['Edition', 'CNSrc'],
86 : ['NumbrSrc', 'I2'], 100 : ['NameType', 'I2'],
110: ['NameType', 'I2'], 111 : ['NameType', 'I2'],
130: ['NFChars', 'I2'], 150 : ['I1', 'NFChars'],
151: ['I1', 'NFChars'], 210 : ['AddEnty', 'I2'],
211: ['AddEnty', 'NFChars'], 212 : ['AddEnty', 'I2'],
214: ['AddEnty', 'NFChars'], 222 : ['I1', 'NFChars'],
240: ['PrntDisp', 'NFChars'], 242 : ['AddEnty', 'NFChars'],
243: ['PrntDisp', 'NFChars'], 245 : ['AddEnty', 'NFChars'],
246: ['NCAddEty', 'TitleTyp'],247 : ['AddEnty', 'NoteCntl'],
270: ['Level', 'AddrType'], 355 : ['CntlElmt', 'I2'],
362: ['DTFormat', 'I2'], 400 : ['NameType', 'Pronoun'],
410: ['NameType', 'Pronoun'], 411 : ['NameType', 'Pronoun'],
430: ['I1', 'NFChars'], 440 : ['I1', 'NFChars'],
450: ['I1', 'NFChars'], 451 : ['I1', 'NFChars'],
490: ['Traced', 'I2'], 505 : ['DCC', 'CDLevel'],
510: ['CoverLoc', 'I2'], 511 : ['DCC', 'I2'],
516: ['DCC', 'I2'], 521 : ['DCC', 'I2'],
520: ['DCC', 'I2'], 522 : ['DCC', 'I2'],
524: ['DCC', 'I2'], 535 : ['Holds', 'I2'],
537: ['DCC', 'I2'], 551 : ['I1', 'NFChars'],
555: ['DCC', 'I2'], 556 : ['DCC', 'I2'],
565: ['DCC', 'I2'], 567 : ['DCC', 'I2'],
581: ['DCC', 'I2'], 582 : ['DCC', 'I2'],
586: ['DCC', 'I2'], 600 : ['NameType', 'SubjSys'],
610: ['NameType', 'SubjSys'], 611 : ['NameType', 'SubjSys'],
630: ['NFChars', 'SubjSys'], 650 : ['SubjLvl', 'SubjSys'],
651: ['I1', 'SubjSys'], 653 : ['IndexLvl', 'I2'],
654: ['IndexLvl', 'I2'], 655 : ['Type', 'Source'],
656: ['I1', 'Source'], 656 : ['I1', 'Source'],
700: ['NameType','EntryType'],710 : ['NameType','EntryType'],
711: ['NameType','EntryType'],730 : ['NFChars','EntryType'],
740: ['NFChars','EntryType'], 760 : ['NoteCntl', 'I2'],
762: ['NoteCntl', 'I2'], 765 : ['NoteCntl', 'I2'],
767: ['NoteCntl', 'I2'], 772 : ['NoteCntl', 'I2'],
773: ['NoteCntl', 'I2'], 775 : ['NoteCntl', 'I2'],
776: ['NoteCntl', 'I2'], 777 : ['NoteCntl', 'I2'],
780: ['NoteCntl', 'RelType'], 785 : ['NoteCntl', 'RelType'],
787: ['NoteCntl', 'I2'], 800 : ['NameType', 'I2'],
810: ['NameType', 'I2'], 811 : ['NameType', 'I2'],
830: ['I1', 'NFChars'], 852 : ['Scheme', 'Order'],
853: ['CmprsExpnd', 'Eval'], 853 : ['CmprsExpnd', 'Eval'],
856: ['AccsMeth', 'I2'], 863 : ['EncLevel', 'HoldForm'],
864: ['EncLevel','HoldForm'], 865 : ['EncLevel', 'HoldForm'],
866: ['EncLevel','Notation'], 867 : ['EncLevel', 'Notation'],
868: ['EncLevel','Notation'], 886 : ['FldType', 'I2']}
subfieldHash = {'1' : "one", '2' : "two", '3' : "three", '4' : "four", '5' : "five",
'6' : "six", '7' : "seven", '8' : "eight", '9' : "nine", '0' : "zero"}
# takes text, turns it into tuple of (ind1, ind2, list of (subfield, val))
# where subfield may repeat within the list.
# We need a structure like this in order to correctly parse both records:
# 650 0 $aWorld War, 1939-1945$xCampaigns$zTunisia
# 650 0 $aReal property$zMississippi$zTippah County$xMaps
# (taken from _USMARC Format for Bibliographic Data_, Prepared by Network
# Development and MARC Standards Office, Cataloging Distribution Service,
# Library of Congress, section 650 p. 5, page printed Dec 1991, looseleaf
# binder issued in 1988.
def parse_sub (field):
if len (field) < 4:
if field[:2] == ' ':
# Is this legit? I've seen it, so handle correctly.
# specifically for au=Johansen, Arnold S from z3950.bibsys.no:2100
return (' ', ' ', [])
elif len(field) == 3:
return (field[0], field[1], [])
return None
if debug and field [2] <> sep:
print "Bad field [2]", repr (field[2])
ind1 = field[0]
ind2 = field[1]
sublist = []
splitlist = string.split (field[2:], sep)
for sub in splitlist:
if (sub == ''): # we begin w/ sep, so there's an empty prefix
continue
sublist.append ((sub[0], string.strip(sub[1:])))
return (ind1, ind2, sublist)
class MARC:
"""Parses data into 'fields' attribute, indexed by field number.
Each value is a list. For fixed fields, it's a list of the string data
(one string for each occurence of the field in the original data). For
other fields, each list element is a tuple of (indicator 1, indicator 2,
subdata), where subdata is a list of tuples of (subfield indicator,
subfield data). Yes, this is kinda lame and I really should have
used structures, but this was some of the first Python code I ever
wrote.
"""
hdrbits = [5,6,7,8, 9, 17,18,19]
# Status, Type, Bib. Level, Type of Ctrl., Enc. Level,
# Descr. Cat. Form, Linked Rcd Reqt are all part of pseudoentry 0
def __init__(self, MARC = None):
self.fields = {}
self.ok = 0
self.marc = MARC
if MARC is None:
return # we'll write to it later
reclen = self.extract_int (0,4)
self.reclen = reclen
baseaddr = self.extract_int (12, 16)
zerostr = ""
for ind in self.hdrbits: zerostr = zerostr + self.marc[ind]
self.fields [0] = [zerostr]
if (debug):
assert (self.marc[9] == ' ') # 'a' would be UCS/Unicode
assert (self.marc[10] == '2' and self.marc[11] == '2')
assert (self.marc[20:22] == '45')
pos = 24
lastpos = baseaddr
while pos < baseaddr:
tag = self.marc[pos:pos+3]
if tag [0] == '\035' or tag [0] == '\036':
break
fieldlen = self.extract_int (pos + 3, pos + 6)
startpos = self.extract_int (pos + 7, pos + 11)
pos = pos + 12
start = baseaddr + startpos
end = start + fieldlen
line = self.marc[start:end]
lastpos = startpos
if line [-1] == '\x1E':
line = line[:-1]
elif (debug):
print "Weird, no hex 1E for", tag, repr(line)
try:
field = string.atoi (tag)
except ValueError:
# replace all non int chrs
tag = intfixre.sub('0', tag)
field = string.atoi(tag)
if is_fixed (field):
self.fields[field] = [line]
# 1-elt list for orthogonality of processing
else:
ps = parse_sub (line)
if ps is None:
raise MarcError (repr(line))
self.fields.setdefault (field, []).append (ps)
self.ok = 1
# XXX should do more error-checking
def __str__ (self):
k = self.fields.keys ()
k.sort ()
lst = []
for field in k:
lst.append (self.stringify_field (field))
return "MARC: \n" + "\n".join (lst)
def stringify_field (self, k):
f = | |
<reponame>EticaAI/HXL-Data-Science-file-formats
#!/usr/bin/env python3
# ==============================================================================
#
# FILE: hxlquickimport
#
# USAGE: hxlquickimport hxlated-data.csv my-exported-file.hxl
# cat hxlated-data.csv | hxlquickimport > my-exported-file.hxl
#
# ### To expose proxy via web (uses hug + ngrok)
# # 1.A If is a file on bin/hxlquickimport
# hug -p 9001 -f bin/hxlquickimport
# # 1.B If was installed with pip3 install hdp-toolchain
# hug -p 9001 -f "$(which hxlquickimport)"
# # 2. To expose via web, on a different terminal, do this:
# ngrok http 9001
#
# DESCRIPTION: hxlquickimport is a quick (and wrong) way to import
# non-HXL dataset (like an .csv or .xlsx, but requires headers
# already on the first row) without human intervention.
# It will try to slugify the original header and add as
# +attributefor a base hashtag like #meta.
# The result may be an HXL with valid syntax (that can be used
# for automated esting) but most HXL powered tools would still
# be human review.
# How does it work?
# "[Max Power] Kids: there's three ways to do things; the right
# way, the wrong way and the Max Power way!
# [<NAME>] Isn't that the wrong way?
# [Max Power] Yeah, but faster!
# (via https://www.youtube.com/watch?v=7P0JM3h7IQk)"
# How to do it the right way?
# Read the documentation on https://hxlstandard.org/.
# (Tip: both HXL Postcards and the hxl-hashtag-chooser are very
# helpful!)
#
# OPTIONS: ---
#
# REQUIREMENTS: - python3
# - libhxl (@see https://pypi.org/project/libhxl/)
# - hug (https://github.com/hugapi/hug/)
# BUGS: ---
# NOTES: ---
# AUTHOR: <NAME> <<EMAIL>>
# COMPANY: EticaAI
# LICENSE: Public Domain dedication
# SPDX-License-Identifier: Unlicense
# VERSION: v1.2
# CREATED: 2021-01-29 07:48 UTC
# REVISION: 2021-01-29 17:34 UTC v1.0
# 2021-02-07 19:32 UTC v1.1 drafted HTTP interface with hug
# 2021-04-20 00:59 UTC v1.2 installable with pip hdp-toolchain
# ==============================================================================
import sys
import os
import logging
import argparse
# @see https://github.com/HXLStandard/libhxl-python
# pip3 install libhxl --upgrade
# Do not import hxl, to avoid circular imports
import hxl.converters
import hxl.filters
import hxl.io
# @see https://github.com/hugapi/hug
# pip3 install hug --upgrade
import hug
import csv
import tempfile
from slugify import slugify
# In Python2, sys.stdin is a byte stream; in Python3, it's a text stream
STDIN = sys.stdin.buffer
class HXLQuickImport:
"""
HXLQuickImport is a classe to export already HXLated data in the format
example.
"""
def __init__(self):
"""
Constructs all the necessary attributes for the HXLQuickImport object.
"""
self.hxlhelper = None
self.args = None
# Posix exit codes
self.EXIT_OK = 0
self.EXIT_ERROR = 1
self.EXIT_SYNTAX = 2
def make_args_hxlquickimport(self):
self.hxlhelper = HXLUtils()
parser = self.hxlhelper.make_args(
description=("""
hxlquickimport is a quick (and wrong) way to import
non-HXL dataset (like an .csv or .xlsx, but requires headers already on the
first row) without human intervention. It will try to slugify the original
header and add as +attributefor a base hashtag like #meta.
The result may be an HXL with valid syntax (that can be used for automated
testing) but most HXL powered tools would still be human review.
How does it work?
"[Max Power] Kids: there's three ways to do things; the right way,
the wrong way and the Max Power way!
[<NAME>] Isn't that the wrong way?
[Max Power] Yeah, but faster!"
(via https://www.youtube.com/watch?v=7P0JM3h7IQk)
How to do it the right way?
Read the documentation on https://hxlstandard.org/.
(Tip: both HXL Postcards and the hxl-hashtag-chooser are very helpful!)
"""))
self.args = parser.parse_args()
return self.args
def execute_cli(self, args,
stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
The execute_cli is the main entrypoint of HXLQuickImport when executed
via cli.
"""
with self.hxlhelper.make_source(args, stdin) as source:
self.hxlquickimport(source, args, True)
return self.EXIT_OK
def execute_web(self, source_url, stdin=STDIN, stdout=sys.stdout,
stderr=sys.stderr):
"""
The execute_web is the main entrypoint of HXL2Tab when this class is
called outside command line interface, like the build in HTTP use with
hug.
NOTE: execute_web (at least on hxlquickimport v1.0) is just an
placeholder, Still not implemented
"""
# TODO: at bare minimum should at least output the content as the cli
# version, but sill not. (fititnt, 2021-02-07 19:32 UTC)
# TODO: the execute_web needs to output the tabfile with correct
# mimetype, compression, etc
# (fititnt, 2021-02-07 15:59 UTC)
self.hxlhelper = HXLUtils()
try:
temp_input = tempfile.NamedTemporaryFile('w')
temp_output = tempfile.NamedTemporaryFile('w')
webargs = type('obj', (object,), {
"infile": source_url,
"sheet_index": None,
"selector": None,
'sheet': None,
'http_header': None,
'ignore_certs': False
})
with self.hxlhelper.make_source(webargs, stdin) as source:
for line in source.gen_raw(True, True):
temp_input.write(line)
temp_input.seek(0)
self.hxlquickimport(temp_input.name, temp_output.name, False)
result_file = open(temp_output.name, 'r')
return result_file.read()
finally:
temp_input.close()
temp_output.close()
return self.EXIT_OK
def hxlquickimport(self, hxlated_input, tab_output, is_stdout):
"""
hic sunt dracones
(__) )
(..) /|\\
(o_o) / | \\
___) \\/,-|,-\\|
//,-/_\\ ) ' '
(//,-'\\|
( ( . \\_
gnv `._\\(___`.
'---' _)/
`-'
"""
header_original = hxlated_input._get_row()
header_new = self.hxlquickimport_header(header_original)
if not args.outfile:
# txt_writer = csv.writer(sys.stdout, delimiter='\t')
txt_writer = csv.writer(sys.stdout)
txt_writer.writerow(header_new)
# for line in hxlated_input:
line = hxlated_input._get_row()
while line:
txt_writer.writerow(line)
try:
line = hxlated_input._get_row()
except Exception:
line = False
else:
tab_output_cleanup = open(args.outfile, 'w')
tab_output_cleanup.truncate()
tab_output_cleanup.close()
with open(args.outfile, 'a') as new_txt:
# txt_writer = csv.writer(new_txt, delimiter='\t')
txt_writer = csv.writer(new_txt)
txt_writer.writerow(header_new)
line = hxlated_input._get_row()
while line:
txt_writer.writerow(line)
try:
line = hxlated_input._get_row()
except Exception:
line = False
def hxlquickimport_header(self, hxlated_header, basehashtag="#item"):
"""
hhxlquickimport_header is a hackish to HXLate an CSV-like dataset
without human intervention.
How it works? It replaces the original header with the base
hashtag and then slugify the original header as attribute, so
ID_REGISTRO -> #item+id_registro
NACIONALIDAD -> #item+nacionalidad
The current version will not avoid 'conflicts' with HXL data types like
BOOL -> #item+bool
number -> #item+number
phone -> #item+phone
"""
for idx, _ in enumerate(hxlated_header):
hxlated_header[idx] = basehashtag + '+' \
+ slugify(hxlated_header[idx], separator="_")
return hxlated_header
class HXLUtils:
"""
HXLUtils contains functions from the Console scripts of libhxl-python
(HXLStandard/libhxl-python/blob/master/hxl/scripts.py) with few changes
to be used as class (and have one single place to change).
Last update on this class was 2021-01-25.
Author: <NAME>
License: Public Domain
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
# Posix exit codes
self.EXIT_OK = 0
self.EXIT_ERROR = 1
self.EXIT_SYNTAX = 2
def make_args(self, description, hxl_output=True):
"""Set up parser with default arguments.
@param description: usage description to show
@param hxl_output: if True (default), include options for HXL output.
@returns: an argument parser, partly set up.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'infile',
help='HXL file to read (if omitted, use standard input).',
nargs='?'
)
if hxl_output:
parser.add_argument(
'outfile',
help='HXL file to write (if omitted, use standard output).',
nargs='?'
)
parser.add_argument(
'--sheet',
help='Select sheet from a workbook (1 is first sheet)',
metavar='number',
type=int,
nargs='?'
)
parser.add_argument(
'--selector',
help='JSONPath expression for starting point in JSON input',
metavar='path',
nargs='?'
)
parser.add_argument(
'--http-header',
help='Custom HTTP header to send with request',
metavar='header',
action='append'
)
if hxl_output:
parser.add_argument(
'--remove-headers',
help='Strip text headers from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--strip-tags',
help='Strip HXL tags from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
"--ignore-certs",
help="Don't verify SSL connections (useful for self-signed)",
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--log',
help='Set minimum logging level',
metavar='debug|info|warning|error|critical|none',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='error'
)
return parser
def add_queries_arg(
self,
parser,
help='Apply only to rows matching at least one query.'
):
parser.add_argument(
'-q',
'--query',
help=help,
metavar='<tagspec><op><value>',
action='append'
)
return parser
def do_common_args(self, args):
"""Process standard args"""
logging.basicConfig(
format='%(levelname)s (%(name)s): %(message)s',
level=args.log.upper())
def make_source(self, args, stdin=STDIN):
"""Create a HXL input source."""
# construct the input object
input = self.make_input(args, stdin)
return hxl.io.data(input)
def make_input(self, args, stdin=sys.stdin, url_or_filename=None):
"""Create an input object"""
if url_or_filename is None:
url_or_filename = args.infile
# sheet index
sheet_index = args.sheet
if sheet_index is not None:
sheet_index -= 1
# JSONPath selector
selector = args.selector
http_headers = self.make_headers(args)
return hxl.io.make_input(
url_or_filename or stdin,
sheet_index=sheet_index,
selector=selector,
allow_local=True,
http_headers=http_headers,
verify_ssl=(not args.ignore_certs)
)
def make_output(self, args, stdout=sys.stdout):
"""Create an output stream."""
if args.outfile:
return FileOutput(args.outfile)
else:
return StreamOutput(stdout)
def make_headers(self, args):
# get custom headers
header_strings = []
header = os.environ.get("HXL_HTTP_HEADER")
if header is not None:
header_strings.append(header)
if args.http_header is not None:
header_strings += args.http_header
http_headers = {}
for header in header_strings:
parts = header.partition(':')
http_headers[parts[0].strip()] = parts[2].strip()
return http_headers
class FileOutput(object):
"""
FileOutput contains is based on libhxl-python with no changes..
Last update on this class was 2021-01-25.
Author: <NAME>
License: Public Domain
"""
def __init__(self, filename):
self.output = open(filename, 'w')
def __enter__(self):
return self
def __exit__(self, value, type, traceback):
self.output.close()
class StreamOutput(object):
"""
StreamOutput contains is based on libhxl-python with no changes..
Last update on this class was 2021-01-25.
Author: <NAME>
License: Public Domain
"""
def | |
from collections import Counter
from tqdm import tqdm
import itertools
import pandas as pd
import numpy as np
from db import get_annotation_db
from journal import get_journal_text, get_journal_info, get_journal_text_representation
responsibility_labels = ["communicating", "info_filtering", "clinical_decisions", "preparation", "symptom_management",
"coordinating_support", "sharing_medical_info", "compliance",
"managing_transitions", "financial_management", "continued_monitoring", "giving_back",
"behavior_changes"]
responsibility_codes = ["CO", "IF", "CD", "PR", "ST", "CS", "SM", "CP", "MT", "FM", "CM", "GB", "BC"]
responsibility_label_to_code_map = {resp_label: responsibility_codes[i] for i, resp_label in enumerate(responsibility_labels)}
responsibility_labels_with_support_management = responsibility_labels + ["support_management"]
responsibility_labels_with_none = responsibility_labels + ["none"]
responsibility_labels_with_all = responsibility_labels + ["none", "support_management"]
"""
This is the set of responsibility labels with Cohen's kappa > 0.4
"""
high_irr_responsibility_labels = ["coordinating_support", "sharing_medical_info", "compliance", "financial_management", "giving_back", "behavior_changes"]
high_irr_responsibility_codes = [responsibility_codes[responsibility_labels.index(resp_label)] for resp_label in high_irr_responsibility_labels]
def sort_responsibility_list(responsibility_list):
return sorted(responsibility_list, key=lambda responsibility: responsibility_labels_with_all.index(responsibility))
def get_labels_from_responsibility_string(responsibility_string, include_none=False,
warn_on_legacy_responsibilities=True):
"""
:param responsibility_string:
:param include_none: If 'none' should be included in the returned list when no other responsibilities are present
:return:
"""
if responsibility_string == "":
return ['none'] if include_none else []
labels = responsibility_string.split('|')
if len(labels) == 0:
print("WARNING: No responsibilities identified, including none")
if include_none:
labels = ['none']
if not include_none and "none" in labels:
labels.remove("none")
# For legacy reasons, we handle support_management, but we replace it with the newer version of the codebook
if "support_management" in labels:
labels.remove("support_management")
labels.append("sharing_medical_info")
labels.append("coordinating_support")
if warn_on_legacy_responsibilities:
print("WARNING: Replaced support management with its newer codes.")
# we sort the responsibility list to ensure they will always appear in a standard order
# will throw an exception if an invalid responsibility occurred in the string
labels = sort_responsibility_list(labels)
return labels
def get_responsibility_annotations(conflict_resolution_strategy="or"):
try:
db = get_annotation_db()
# the most recent version of the annotation guidance was set at this date,
# so we restrict to since this time period
created_at = '2018-08-23'
cursor = db.execute("""
SELECT a.site_id, a.journal_oid, a.data, a.username, c.correct_username
FROM journalAnnotation a LEFT JOIN journalAnnotationConflictResolution c
ON a.site_id = c.site_id AND a.journal_oid = c.journal_oid AND a.annotation_type = c.annotation_type
WHERE a.annotation_type = "journal_patient_responsibilities" AND a.data <> ""
AND a.created >= ?
GROUP BY a.site_id, a.journal_oid, a.username
ORDER BY a.id DESC
""", (created_at,))
responsibility_annotations = []
# Sort the returned annotations so that we can group by the individual journals
def group_by_journal_function(row):
return row['site_id'], row['journal_oid']
# group by the journals, writing a single line for each journal in the dataset
all_rows = cursor.fetchall()
all_rows.sort(key=group_by_journal_function)
for key, group in itertools.groupby(all_rows, group_by_journal_function):
rows = list(group)
site_id, journal_oid = key
# We are considering all of the annotations for a single journal here
data = None
responsibilities = None
if len(rows) == 1:
assert rows[0]['correct_username'] is None or rows[0]['correct_username'] == ""
annotator_usernames = rows[0]['username']
conflict_status = "SINGLE USER"
data = rows[0]['data']
else: # 2 or more annotators
# get the list of annotator names
annotator_usernames = "|".join(sorted([row['username'] for row in rows]))
if rows[0]['correct_username'] is not None and rows[0]['correct_username'] != "":
# this annotation was corrected!
correct_username = rows[0]['correct_username']
conflict_status = "RESOLVED"
data = None
for row in rows:
if row['username'] == correct_username:
data = row['data']
if data is None:
raise ValueError("Annotation unexpectedly lacks data.")
if data is None:
# this condition implies an invalid correction in the database
print(
f"WARNING: {correct_username} not found in {annotator_usernames}. Replacing with 'unknown'.")
data = 'unknown'
else: # no correction for this journal
responsibilities, conflict_status = \
resolve_responsibility_annotation_conflicts(rows,
resolution_strategy=conflict_resolution_strategy)
if data is None and responsibilities is None:
raise ValueError("Unexpected and unhandled conflicts between a journal's responsibility annotations.")
if responsibilities is None:
responsibilities = get_labels_from_responsibility_string(data)
responsibility_annotation_data = {'site_id': site_id,
'journal_oid': journal_oid,
'conflict_status': conflict_status,
'responsibilities': responsibilities}
responsibility_annotations.append(responsibility_annotation_data)
return responsibility_annotations
finally:
db.close()
def get_responsibility_annotations_dataframe():
df = get_responsibility_annotations_by_username_dataframe()
responsibility_annotations = []
# group by the journals, writing a single line for each journal in the dataset
for key, group in df.groupby(by=['site_id', 'journal_oid']):
rows = group
site_id, journal_oid = key
# We are considering all of the annotations for a single journal here
responsibilities = None
if len(rows) == 1:
row = rows.iloc[0]
assert row['correct_username'] is None or row['correct_username'] == ""
annotator_usernames = row['username']
conflict_status = "SINGLE USER"
responsibilities = row.responsibilities
else: # 2 or more annotators
first_row = rows.iloc[0]
if first_row['correct_username'] is not None and first_row['correct_username'] != "":
# this annotation was corrected!
correct_username = first_row['correct_username']
conflict_status = "RESOLVED"
selected_row = rows[rows.username == correct_username]
if len(selected_row) == 0:
# this condition implies an invalid correction in the database
print(
f"WARNING: {correct_username} not found in {annotator_usernames}. Replacing with 'unknown'.")
responsibilities = ['unknown']
elif len(selected_row) > 1:
raise ValueError("Multiple annotations from same username.")
else:
responsibilities = selected_row.iloc[0].responsibilities
else: # no correction for this journal
# use the or strategy
conflict_status = 'CONFLICT'
responsibility_sets = rows.apply(lambda row: set(row.responsibilities), axis=1)
included_responsibilities = set()
for responsibility_set in responsibility_sets:
included_responsibilities = included_responsibilities | responsibility_set
if len(included_responsibilities) == 0:
responsibilities = ['unknown']
else:
responsibilities = sort_responsibility_list(list(included_responsibilities))
if responsibilities is None:
raise ValueError("Unexpected and unhandled conflicts between a journal's responsibility annotations.")
responsibility_annotation_data = {'site_id': site_id,
'journal_oid': journal_oid,
'conflict_status': conflict_status,
'responsibilities': responsibilities}
responsibility_annotations.append(responsibility_annotation_data)
return pd.DataFrame(responsibility_annotations)
def get_responsibility_annotations_by_username():
try:
db = get_annotation_db()
created_at = '2018-08-23'
cursor = db.execute("""
SELECT a.site_id, a.journal_oid, a.data, a.username, c.correct_username
FROM journalAnnotation a LEFT JOIN journalAnnotationConflictResolution c
ON a.site_id = c.site_id AND a.journal_oid = c.journal_oid AND a.annotation_type = c.annotation_type
WHERE a.annotation_type = "journal_patient_responsibilities" AND a.data <> ""
AND a.created >= ?
GROUP BY a.site_id, a.journal_oid, a.username
ORDER BY a.id DESC
""", (created_at,))
responsibility_annotations = []
rows = cursor.fetchall()
for row in rows:
site_id, journal_oid = row['site_id'], row['journal_oid']
username = row['username']
is_corrected = row['correct_username'] is not None and row['correct_username'] != ""
responsibilities = get_labels_from_responsibility_string(row['data'])
responsibility_annotation_data = {'site_id': site_id,
'journal_oid': journal_oid,
'username': username,
'responsibilities': responsibilities,
'is_corrected': is_corrected}
responsibility_annotations.append(responsibility_annotation_data)
return responsibility_annotations
finally:
db.close()
def get_responsibility_annotations_by_username_dataframe():
"""Fixed version of get_responsibility_annotations_by_username() which should be preferred."""
try:
db = get_annotation_db()
created_at = '2018-08-23'
cursor = db.execute("""
SELECT a.site_id, a.journal_oid, a.data, a.username, a.id, c.correct_username
FROM journalAnnotation a LEFT JOIN journalAnnotationConflictResolution c
ON a.site_id = c.site_id AND a.journal_oid = c.journal_oid AND a.annotation_type = c.annotation_type
WHERE a.annotation_type = "journal_patient_responsibilities" AND a.data <> ""
AND a.created >= ?
GROUP BY a.site_id, a.journal_oid, a.username, a.id
ORDER BY a.id DESC
""", (created_at,))
responsibility_annotations = []
rows = cursor.fetchall()
for row in rows:
site_id, journal_oid = row['site_id'], row['journal_oid']
username = row['username']
correct_username = row['correct_username']
responsibilities = get_labels_from_responsibility_string(row['data'])
responsibility_annotation_data = {'site_id': site_id,
'journal_oid': journal_oid,
'username': username,
'correct_username': correct_username,
'responsibilities': responsibilities,
'id': row['id']}
responsibility_annotations.append(responsibility_annotation_data)
responsibility_df = pd.DataFrame(responsibility_annotations)
new_df_inds = [group.id.idxmax()
for key, group
in responsibility_df.groupby(by=['site_id', 'journal_oid', 'username'])]
new_df = responsibility_df.loc[new_df_inds].copy()
new_df.reset_index(inplace=True)
return new_df
finally:
db.close()
def resolve_responsibility_annotation_conflicts(rows, allow_majority_agreement=True, resolution_strategy="or"):
"""
:param rows: Rows of annotations containing 'data'
:param allow_majority_agreement: If annotation sets with at least one set of complete agreement ought to be
allowed as a no-conflict situation
:param resolution_strategy: View the source for the exact implementation of each approach.
Valid values are: 'none', 'empty', 'min2', 'and', 'or' (default)
:return: The responsibilities, as resolved from any conflict,
and a string describing the status of conflict resolution
"""
combinations = [(combo[0]['data'] == combo[1]['data'], combo[0]['data'])
for combo in itertools.combinations(rows, 2)]
agreements = [data for is_match, data in combinations if is_match is True]
responsibilities = None # this function resolves conflicts, assigning this list of responsibilities
if len(agreements) == len(combinations): # all annotators agree
conflict_status = "NO CONFLICT"
data = agreements[0]
elif allow_majority_agreement and len(agreements) >= 1: # at least one pair of annotators agree
# note that this isn't the same as majority agreement if > 3 annotators have annotated a single journal
# but at the time of implementation that will never happen
conflict_status = "MINIMAL CONFLICT"
data = agreements[0]
else: # no agreements between any of the annotators!
# this annotation was not corrected and there is no absolute agreement
# In this situation, we'll resolve based on the resolution strategy
conflict_status = "CONFLICT"
if resolution_strategy == "none":
data = ""
responsibilities = ['none']
elif resolution_strategy == "empty":
data = ""
responsibilities = []
elif resolution_strategy == "min2":
# this strategy includes all annotations used at least twice between annotators
data = ""
responsibility_lists = [get_labels_from_responsibility_string(row['data']) for row in rows]
| |
<filename>depricated/km.py
from __future__ import division
import numpy as np
from collections import defaultdict
import json
import itertools
from sklearn import cluster, preprocessing, manifold
from datetime import datetime
import sys
class KeplerMapper(object):
def __init__(
self,
cluster_algorithm=cluster.DBSCAN(eps=0.5, min_samples=3),
nr_cubes=10,
overlap_perc=0.1,
scaler=preprocessing.MinMaxScaler(),
reducer=None,
color_function="distance_origin",
link_local=False,
verbose=1,
):
self.clf = cluster_algorithm
self.nr_cubes = nr_cubes
self.overlap_perc = overlap_perc
self.scaler = scaler
self.color_function = color_function
self.verbose = verbose
self.link_local = link_local
self.reducer = reducer
self.chunk_dist = []
self.overlap_dist = []
self.d = []
if self.verbose > 0:
print(
"\nnr_cubes = %s \n\noverlap_perc = %s\n\nlink_local = %s\n\nClusterer = %s\n\nScaler = %s\n\n"
% (
self.nr_cubes,
overlap_perc,
self.link_local,
str(self.clf),
str(self.scaler),
)
)
def fit_transform(self, X):
# Dimensionality Reduction
if self.reducer != None:
if self.verbose > 0:
try:
self.reducer.set_params(**{"verbose": self.verbose})
except:
pass
print("\n..Reducing Dimensionality using: \n\t%s\n" % str(self.reducer))
reducer = self.reducer
X = reducer.fit_transform(X)
# Scaling
if self.scaler != None:
if self.verbose > 0:
print("\n..Scaling\n")
scaler = self.scaler
X = scaler.fit_transform(X)
# We chop up the min-max column ranges into 'nr_cubes' parts
self.chunk_dist = (np.max(X, axis=0) - np.min(X, axis=0)) / self.nr_cubes
# We calculate the overlapping windows distance
self.overlap_dist = self.overlap_perc * self.chunk_dist
# We find our starting point
self.d = np.min(X, axis=0)
return X
def map(self, X, dimension_index=[0], dimension_name=""):
# This maps the data to a simplicial complex. Returns a dictionary with nodes and links.
start = datetime.now()
def cube_coordinates_all(nr_cubes, nr_dimensions):
# if there are 4 cubes per dimension and 3 dimensions
# return the bottom left (origin) coordinates of 64 hypercubes, in a sorted list of Numpy arrays
l = []
for x in range(nr_cubes):
l += [x] * nr_dimensions
return [
np.array(list(f))
for f in sorted(set(itertools.permutations(l, nr_dimensions)))
]
nodes = defaultdict(list)
links = defaultdict(list)
complex = {}
if self.verbose > 0:
print(
"Mapping on data shaped %s using dimensions %s\n"
% (str(X.shape), str(dimension_index))
)
# Scaling
if self.scaler != None:
scaler = self.scaler
X = scaler.fit_transform(X)
# Initialize Cluster Algorithm
clf = self.clf
# Prefix'ing the data with ID's
ids = np.array([x for x in range(X.shape[0])])
X = np.c_[ids, X]
# Subdivide the data X in intervals/hypercubes with overlap
if self.verbose > 0:
total_cubes = len(cube_coordinates_all(self.nr_cubes, len(dimension_index)))
print("Creating %s hypercubes." % total_cubes)
di = np.array(dimension_index)
for i, coor in enumerate(cube_coordinates_all(self.nr_cubes, di.shape[0])):
# Slice the hypercube
hypercube = X[
np.invert(
np.any(
(X[:, di + 1] >= self.d[di] + (coor * self.chunk_dist[di]))
& (
X[:, di + 1]
< self.d[di]
+ (coor * self.chunk_dist[di])
+ self.chunk_dist[di]
+ self.overlap_dist[di]
)
== False,
axis=1,
)
)
]
if self.verbose > 1:
print(
"There are %s points in cube_%s / %s with starting range %s"
% (
hypercube.shape[0],
i,
total_cubes,
self.d[di] + (coor * self.chunk_dist[di]),
)
)
# If at least one sample inside the hypercube
if hypercube.shape[0] > 0:
# Cluster the data point(s) inside the cube, skipping the id-column
clf.fit(hypercube[:, 1:])
if self.verbose > 1:
print(
"Found %s clusters in cube_%s\n"
% (np.unique(clf.labels_[clf.labels_ > -1]).shape[0], i)
)
# Now for every (sample id in cube, predicted cluster label)
for a in np.c_[hypercube[:, 0], clf.labels_]:
if a[1] != -1: # if not predicted as noise
cluster_id = (
str(coor[0])
+ "_"
+ str(i)
+ "_"
+ str(a[1])
+ "_"
+ str(coor)
+ "_"
+ str(self.d[di] + (coor * self.chunk_dist[di]))
) # Rudimentary cluster id
nodes[cluster_id].append(
int(a[0])
) # Append the member id's as integers
else:
if self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
# Create links when clusters from different hypercubes have members with the same sample id.
for k in nodes:
for kn in nodes:
if k != kn:
if len(nodes[k] + nodes[kn]) != len(
set(nodes[kn] + nodes[k])
): # there are non-unique id's in the union
links[k].append(kn)
# Create links between local hypercube clusters if setting link_local = True
# This is an experimental feature deviating too much from the original mapper algo.
# Creates a lot of spurious edges, and should only be used when mapping one or at most two dimensions.
if self.link_local:
if k.split("_")[0] == kn.split("_")[0]:
links[k].append(kn)
# Reporting
if self.verbose > 0:
nr_links = 0
for k in links:
nr_links += len(links[k])
print(
"\ncreated %s edges and %s nodes in %s."
% (nr_links, len(nodes), str(datetime.now() - start))
)
complex["nodes"] = nodes
complex["links"] = links
complex["meta"] = dimension_name
return complex
def visualize(
self,
complex,
path_html="mapper_visualization_output.html",
title="My Data",
graph_link_distance=30,
graph_gravity=0.1,
graph_charge=-120,
custom_tooltips=None,
width_html=0,
height_html=0,
show_tooltips=True,
show_title=True,
show_meta=True,
):
# Turns the dictionary 'complex' in a html file with d3.js
# Format JSON
json_s = {}
json_s["nodes"] = []
json_s["links"] = []
k2e = {} # a key to incremental int dict, used for id's when linking
for e, k in enumerate(complex["nodes"]):
# Tooltip formatting
if custom_tooltips != None:
tooltip_s = "<h2>Cluster %s</h2>" % k + " ".join(
[str(f) for f in custom_tooltips[complex["nodes"][k]]]
)
if self.color_function == "average_signal_cluster":
tooltip_i = int(
(
(
sum([f for f in custom_tooltips[complex["nodes"][k]]])
/ len(custom_tooltips[complex["nodes"][k]])
)
* 30
)
)
json_s["nodes"].append(
{
"name": str(k),
"tooltip": tooltip_s,
"group": 2 * int(np.log(len(complex["nodes"][k]))),
"color": str(tooltip_i),
}
)
else:
json_s["nodes"].append(
{
"name": str(k),
"tooltip": tooltip_s,
"group": 2 * int(np.log(len(complex["nodes"][k]))),
"color": str(k.split("_")[0]),
}
)
else:
tooltip_s = "<h2>Cluster %s</h2>Contains %s members." % (
k,
len(complex["nodes"][k]),
)
json_s["nodes"].append(
{
"name": str(k),
"tooltip": tooltip_s,
"group": 2 * int(np.log(len(complex["nodes"][k]))),
"color": str(k.split("_")[0]),
}
)
k2e[k] = e
for k in complex["links"]:
for link in complex["links"][k]:
json_s["links"].append(
{"source": k2e[k], "target": k2e[link], "value": 1}
)
# Width and height of graph in HTML output
if width_html == 0:
width_css = "100%"
width_js = 'document.getElementById("holder").offsetWidth-20'
else:
width_css = "%spx" % width_html
width_js = "%s" % width_html
if height_html == 0:
height_css = "100%"
height_js = 'document.getElementById("holder").offsetHeight-20'
else:
height_css = "%spx" % height_html
height_js = "%s" % height_html
# Whether to show certain UI elements or not
if show_tooltips == False:
tooltips_display = "display: none;"
else:
tooltips_display = ""
if show_meta == False:
meta_display = "display: none;"
else:
meta_display = ""
if show_title == False:
title_display = "display: none;"
else:
title_display = ""
with open(path_html, "wb") as outfile:
html = """<!DOCTYPE html>
<meta charset="utf-8">
<meta name="generator" content="KeplerMapper">
<title>%s | KeplerMapper</title>
<link href='https://fonts.googleapis.com/css?family=Roboto:700,300' rel='stylesheet' type='text/css'>
<style>
* {margin: 0; padding: 0;}
html { height: 100%%;}
body {background: #111; height: 100%%; font: 100 16px Roboto, Sans-serif;}
.link { stroke: #999; stroke-opacity: .333; }
.divs div { border-radius: 50%%; background: red; position: absolute; }
.divs { position: absolute; top: 0; left: 0; }
#holder { position: relative; width: %s; height: %s; background: #111; display: block;}
h1 { %s padding: 20px; color: #fafafa; text-shadow: 0px 1px #000,0px -1px #000; position: absolute; font: 300 30px Roboto, Sans-serif;}
h2 { text-shadow: 0px 1px #000,0px -1px #000; font: 700 16px Roboto, Sans-serif;}
.meta { position: absolute; opacity: 0.9; width: 220px; top: 80px; left: 20px; display: block; %s background: #000; line-height: 25px; color: #fafafa; border: 20px solid #000; font: 100 16px Roboto, Sans-serif;}
div.tooltip { position: absolute; width: 380px; display: block; %s padding: 20px; background: #000; border: 0px; border-radius: 3px; pointer-events: none; z-index: 999; color: #FAFAFA;}
}
</style>
<body>
<div id="holder">
<h1>%s</h1>
<p class="meta">
<b>Lens</b><br>%s<br><br>
<b>Cubes per dimension</b><br>%s<br><br>
<b>Overlap percentage</b><br>%s%%<br><br>
<!-- <b>Linking locally</b><br>%s<br><br> -->
<b>Color Function</b><br>%s( %s )<br><br>
<b>Clusterer</b><br>%s<br><br>
<b>Scaler</b><br>%s
</p>
</div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script>
var width = %s,
height = %s;
var color = d3.scale.ordinal()
.domain(["0","1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"])
.range(["#FF0000","#FF1400","#FF2800","#FF3c00","#FF5000","#FF6400","#FF7800","#FF8c00","#FFa000","#FFb400","#FFc800","#FFdc00","#FFf000","#fdff00","#b0ff00","#65ff00","#17ff00","#00ff36","#00ff83","#00ffd0","#00e4ff","#00c4ff","#00a4ff","#00a4ff","#0084ff","#0064ff","#0044ff","#0022ff","#0002ff","#0100ff","#0300ff","#0500ff"]);
var force = d3.layout.force()
.charge(%s)
.linkDistance(%s)
.gravity(%s)
.size([width, height]);
var svg = d3.select("#holder").append("svg")
.attr("width", width)
.attr("height", height);
var div = d3.select("#holder").append("div")
.attr("class", "tooltip")
.style("opacity", 0.0);
var divs = d3.select('#holder').append('div')
.attr('class', 'divs')
.attr('style', function(d) { return 'overflow: hidden; width: ' + width + 'px; height: ' + height + 'px;'; });
graph = %s;
force
.nodes(graph.nodes)
.links(graph.links)
.start();
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.style("stroke-width", function(d) { return Math.sqrt(d.value); });
var node = divs.selectAll('div')
.data(graph.nodes)
.enter().append('div')
.on("mouseover", function(d) {
div.transition()
.duration(200)
.style("opacity", .9);
div .html(d.tooltip + "<br/>")
.style("left", (d3.event.pageX + 100) | |
from django.db.models.options import Options
autocreate_through_tables = hasattr(Options({}), 'auto_created')
add_field = {
'AddNonNullNonCallableColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" integer ;',
'UPDATE "tests_testmodel" SET "added_field" = 1 WHERE "added_field" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "added_field" SET NOT NULL;',
]),
'AddNonNullCallableColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" integer ;',
'UPDATE "tests_testmodel" SET "added_field" = "int_field" WHERE "added_field" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "added_field" SET NOT NULL;',
]),
'AddNullColumnWithInitialColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" integer ;',
'UPDATE "tests_testmodel" SET "added_field" = 1 WHERE "added_field" IS NULL;',
]),
'AddStringColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" varchar(10) ;',
'UPDATE "tests_testmodel" SET "added_field" = \'abc\\\'s xyz\' WHERE "added_field" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "added_field" SET NOT NULL;',
]),
'AddDateColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" timestamp with time zone ;',
'UPDATE "tests_testmodel" SET "added_field" = 2007-12-13 16:42:00 WHERE "added_field" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "added_field" SET NOT NULL;',
]),
'AddDefaultColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" integer ;',
'UPDATE "tests_testmodel" SET "added_field" = 42 WHERE "added_field" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "added_field" SET NOT NULL;',
]),
'AddEmptyStringDefaultColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" varchar(20) ;',
'UPDATE "tests_testmodel" SET "added_field" = \'\' WHERE "added_field" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "added_field" SET NOT NULL;',
]),
'AddNullColumnModel':
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" integer NULL ;',
'NonDefaultColumnModel':
'ALTER TABLE "tests_testmodel" ADD COLUMN "non-default_column" integer NULL ;',
'AddColumnCustomTableModel':
'ALTER TABLE "custom_table_name" ADD COLUMN "added_field" integer NULL ;',
'AddIndexedColumnModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "add_field" integer NULL ;',
'CREATE INDEX "tests_testmodel_add_field" ON "tests_testmodel" ("add_field");'
]),
'AddUniqueColumnModel':
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" integer NULL UNIQUE;',
'AddUniqueIndexedModel':
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field" integer NULL UNIQUE;',
'AddForeignKeyModel':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field_id" integer NULL REFERENCES "tests_addanchor1" ("id") DEFERRABLE INITIALLY DEFERRED;',
'CREATE INDEX "tests_testmodel_added_field_id" ON "tests_testmodel" ("added_field_id");'
]),
}
if autocreate_through_tables:
add_field.update({
'AddManyToManyDatabaseTableModel':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" serial NOT NULL PRIMARY KEY,',
' "testmodel_id" integer NOT NULL,',
' "addanchor1_id" integer NOT NULL,',
' UNIQUE ("testmodel_id", "addanchor1_id")',
')',
';',
'ALTER TABLE "tests_testmodel_added_field" ADD CONSTRAINT "testmodel_id_refs_id_ed159e33" FOREIGN KEY ("testmodel_id") REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED;',
'ALTER TABLE "tests_testmodel_added_field" ADD CONSTRAINT "addanchor1_id_refs_id_7efbb240" FOREIGN KEY ("addanchor1_id") REFERENCES "tests_addanchor1" ("id") DEFERRABLE INITIALLY DEFERRED;',
]),
'AddManyToManyNonDefaultDatabaseTableModel':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" serial NOT NULL PRIMARY KEY,',
' "testmodel_id" integer NOT NULL,',
' "addanchor2_id" integer NOT NULL,',
' UNIQUE ("testmodel_id", "addanchor2_id")',
')',
';',
'ALTER TABLE "tests_testmodel_added_field" ADD CONSTRAINT "testmodel_id_refs_id_ed159e33" FOREIGN KEY ("testmodel_id") REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED;',
'ALTER TABLE "tests_testmodel_added_field" ADD CONSTRAINT "addanchor2_id_refs_id_ec3e2588" FOREIGN KEY ("addanchor2_id") REFERENCES "custom_add_anchor_table" ("id") DEFERRABLE INITIALLY DEFERRED;',
]),
'AddManyToManySelf':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" serial NOT NULL PRIMARY KEY,',
' "from_testmodel_id" integer NOT NULL,',
' "to_testmodel_id" integer NOT NULL,',
' UNIQUE ("from_testmodel_id", "to_testmodel_id")',
')',
';',
'ALTER TABLE "tests_testmodel_added_field" ADD CONSTRAINT "from_testmodel_id_refs_id_ed159e33" FOREIGN KEY ("from_testmodel_id") REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED;',
'ALTER TABLE "tests_testmodel_added_field" ADD CONSTRAINT "to_testmodel_id_refs_id_ed159e33" FOREIGN KEY ("to_testmodel_id") REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED;',
]),
})
else:
add_field.update({
'AddManyToManyDatabaseTableModel':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" serial NOT NULL PRIMARY KEY,',
' "testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED,',
' "addanchor1_id" integer NOT NULL REFERENCES "tests_addanchor1" ("id") DEFERRABLE INITIALLY DEFERRED,',
' UNIQUE ("testmodel_id", "addanchor1_id")',
')',
';'
]),
'AddManyToManyNonDefaultDatabaseTableModel':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" serial NOT NULL PRIMARY KEY,',
' "testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED,',
' "addanchor2_id" integer NOT NULL REFERENCES "custom_add_anchor_table" ("id") DEFERRABLE INITIALLY DEFERRED,',
' UNIQUE ("testmodel_id", "addanchor2_id")',
')',
';'
]),
'AddManyToManySelf':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" serial NOT NULL PRIMARY KEY,',
' "from_testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED,',
' "to_testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED,',
' UNIQUE ("from_testmodel_id", "to_testmodel_id")',
')',
';'
]),
})
delete_field = {
'DefaultNamedColumnModel':
'ALTER TABLE "tests_testmodel" DROP COLUMN "int_field" CASCADE;',
'NonDefaultNamedColumnModel':
'ALTER TABLE "tests_testmodel" DROP COLUMN "non-default_db_column" CASCADE;',
'ConstrainedColumnModel':
'ALTER TABLE "tests_testmodel" DROP COLUMN "int_field3" CASCADE;',
'DefaultManyToManyModel':
'DROP TABLE "tests_testmodel_m2m_field1";',
'NonDefaultManyToManyModel':
'DROP TABLE "non-default_m2m_table";',
'DeleteForeignKeyModel':
'ALTER TABLE "tests_testmodel" DROP COLUMN "fk_field1_id" CASCADE;',
'DeleteColumnCustomTableModel':
'ALTER TABLE "custom_table_name" DROP COLUMN "value" CASCADE;',
}
change_field = {
"SetNotNullChangeModelWithConstant":
'\n'.join([
'UPDATE "tests_testmodel" SET "char_field1" = \'abc\\\'s xyz\' WHERE "char_field1" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field1" SET NOT NULL;',
]),
"SetNotNullChangeModelWithCallable":
'\n'.join([
'UPDATE "tests_testmodel" SET "char_field1" = "char_field" WHERE "char_field1" IS NULL;',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field1" SET NOT NULL;',
]),
"SetNullChangeModel": 'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field2" DROP NOT NULL;',
"NoOpChangeModel": '',
"IncreasingMaxLengthChangeModel": 'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field" TYPE varchar(45) USING CAST("char_field" as varchar(45));',
"DecreasingMaxLengthChangeModel": 'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field" TYPE varchar(1) USING CAST("char_field" as varchar(1));',
"DBColumnChangeModel": 'ALTER TABLE "tests_testmodel" RENAME COLUMN "custom_db_column" TO "customised_db_column";',
"AddDBIndexChangeModel": 'CREATE INDEX "tests_testmodel_int_field2" ON "tests_testmodel" ("int_field2");',
"RemoveDBIndexChangeModel": 'DROP INDEX "tests_testmodel_int_field1";',
"AddUniqueChangeModel": 'ALTER TABLE "tests_testmodel" ADD CONSTRAINT tests_testmodel_int_field4_key UNIQUE("int_field4");',
"RemoveUniqueChangeModel": 'ALTER TABLE "tests_testmodel" DROP CONSTRAINT tests_testmodel_int_field3_key;',
"MultiAttrChangeModel":
'\n'.join([
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field2" DROP NOT NULL;',
'ALTER TABLE "tests_testmodel" RENAME COLUMN "custom_db_column" TO "custom_db_column2";',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field" TYPE varchar(35) USING CAST("char_field" as varchar(35));',
]),
"MultiAttrSingleFieldChangeModel":
'\n'.join([
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field2" TYPE varchar(35) USING CAST("char_field2" as varchar(35));',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field2" DROP NOT NULL;',
]),
"RedundantAttrsChangeModel":
'\n'.join([
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field2" DROP NOT NULL;',
'ALTER TABLE "tests_testmodel" RENAME COLUMN "custom_db_column" TO "custom_db_column3";',
'ALTER TABLE "tests_testmodel" ALTER COLUMN "char_field" TYPE varchar(35) USING CAST("char_field" as varchar(35));',
]),
}
if autocreate_through_tables:
change_field.update({
"M2MDBTableChangeModel":
'\n'.join([
'ALTER TABLE "change_field_non-default_m2m_table" DROP CONSTRAINT "testmodel_id_refs_my_id_5d3392f8";',
'ALTER TABLE "change_field_non-default_m2m_table" RENAME TO "custom_m2m_db_table_name";',
'ALTER TABLE "custom_m2m_db_table_name" ADD CONSTRAINT "testmodel_id_refs_my_id_a31f0c6f" FOREIGN KEY ("testmodel_id") REFERENCES "tests_testmodel" ("my_id") DEFERRABLE INITIALLY DEFERRED;',
]),
})
else:
change_field.update({
"M2MDBTableChangeModel": 'ALTER TABLE "change_field_non-default_m2m_table" RENAME TO "custom_m2m_db_table_name";',
})
delete_model = {
'BasicModel':
'DROP TABLE "tests_basicmodel";',
'BasicWithM2MModel':
'\n'.join([
'DROP TABLE "tests_basicwithm2mmodel_m2m";',
'DROP TABLE "tests_basicwithm2mmodel";'
]),
'CustomTableModel':
'DROP TABLE "custom_table_name";',
'CustomTableWithM2MModel':
'\n'.join([
'DROP TABLE "another_custom_table_name_m2m";',
'DROP TABLE "another_custom_table_name";'
]),
}
delete_application = {
'DeleteApplication':
'\n'.join([
'DROP TABLE "tests_appdeleteanchor1";',
'DROP TABLE "app_delete_custom_add_anchor_table";',
'DROP TABLE "tests_testmodel_anchor_m2m";',
'DROP TABLE "tests_testmodel";',
'DROP TABLE "app_delete_custom_table_name";',
]),
}
rename_field = {
'RenameColumnModel':
'ALTER TABLE "tests_testmodel" RENAME COLUMN "int_field" TO "renamed_field";',
'RenameColumnWithTableNameModel':
'ALTER TABLE "tests_testmodel" RENAME COLUMN "int_field" TO "renamed_field";',
'RenameForeignKeyColumnModel':
'ALTER TABLE "tests_testmodel" RENAME COLUMN "fk_field_id" TO "renamed_field_id";',
'RenameNonDefaultColumnNameModel':
'ALTER TABLE "tests_testmodel" RENAME COLUMN "custom_db_col_name" TO "renamed_field";',
'RenameNonDefaultColumnNameToNonDefaultNameModel':
'ALTER TABLE "tests_testmodel" RENAME COLUMN "custom_db_col_name" TO "non-default_column_name";',
'RenameNonDefaultColumnNameToNonDefaultNameAndTableModel':
'ALTER TABLE "tests_testmodel" RENAME COLUMN "custom_db_col_name" TO "non-default_column_name2";',
'RenameColumnCustomTableModel':
'ALTER TABLE "custom_rename_table_name" RENAME COLUMN "value" TO "renamed_field";',
'RenameNonDefaultManyToManyTableModel':
'ALTER TABLE "non-default_db_table" RENAME TO "tests_testmodel_renamed_field";',
}
sql_mutation = {
'SQLMutationSequence': """[
... SQLMutation('first-two-fields', [
... 'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field1" integer NULL;',
... 'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field2" integer NULL;'
... ], update_first_two),
... SQLMutation('third-field', [
... 'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field3" integer NULL;',
... ], update_third)]
""",
'SQLMutationOutput':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field1" integer NULL;',
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field2" integer NULL;',
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field3" integer NULL;',
]),
}
if autocreate_through_tables:
rename_field.update({
'RenamePrimaryKeyColumnModel':
'\n'.join([
'ALTER TABLE "non-default_db_table" DROP CONSTRAINT "testmodel_id_refs_id_eeae318e";',
'ALTER TABLE "tests_testmodel_m2m_field" DROP CONSTRAINT "testmodel_id_refs_id_ba77d38d";',
'ALTER TABLE "tests_testmodel" RENAME COLUMN "id" TO "my_pk_id";',
'ALTER TABLE "non-default_db_table" ADD CONSTRAINT "testmodel_id_refs_my_pk_id_eeae318e" FOREIGN KEY ("testmodel_id") REFERENCES "tests_testmodel" ("my_pk_id") DEFERRABLE INITIALLY DEFERRED;',
'ALTER TABLE "tests_testmodel_m2m_field" ADD CONSTRAINT "testmodel_id_refs_my_pk_id_ba77d38d" FOREIGN KEY ("testmodel_id") REFERENCES "tests_testmodel" ("my_pk_id") DEFERRABLE INITIALLY DEFERRED;',
]),
'RenameManyToManyTableModel':
'\n'.join([
'ALTER TABLE "tests_testmodel_m2m_field" DROP CONSTRAINT "testmodel_id_refs_id_ba77d38d";',
'ALTER TABLE "tests_testmodel_m2m_field" RENAME TO "tests_testmodel_renamed_field";',
'ALTER TABLE "tests_testmodel_renamed_field" ADD CONSTRAINT "testmodel_id_refs_id_f50a5e5d" FOREIGN KEY ("testmodel_id") REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED;',
]),
'RenameManyToManyTableWithColumnNameModel':
'\n'.join([
'ALTER TABLE "tests_testmodel_m2m_field" DROP CONSTRAINT "testmodel_id_refs_id_ba77d38d";',
'ALTER TABLE "tests_testmodel_m2m_field" RENAME TO "tests_testmodel_renamed_field";',
'ALTER TABLE "tests_testmodel_renamed_field" ADD CONSTRAINT "testmodel_id_refs_id_f50a5e5d" FOREIGN KEY ("testmodel_id") REFERENCES "tests_testmodel" ("id") DEFERRABLE INITIALLY DEFERRED;',
]),
})
else:
rename_field.update({
'RenamePrimaryKeyColumnModel':
'ALTER TABLE "tests_testmodel" RENAME COLUMN "id" TO "my_pk_id";',
'RenameManyToManyTableModel':
'ALTER TABLE "tests_testmodel_m2m_field" RENAME TO "tests_testmodel_renamed_field";',
'RenameManyToManyTableWithColumnNameModel':
'ALTER TABLE "tests_testmodel_m2m_field" RENAME TO "tests_testmodel_renamed_field";',
})
generics = {
'DeleteColumnModel': 'ALTER TABLE "tests_testmodel" DROP COLUMN "char_field" CASCADE;'
}
inheritance = {
'AddToChildModel':
'\n'.join([
'ALTER TABLE "tests_childmodel" ADD COLUMN "added_field" integer ;',
'UPDATE "tests_childmodel" SET "added_field" = 42 WHERE "added_field" IS NULL;',
'ALTER TABLE "tests_childmodel" ALTER COLUMN "added_field" SET | |
to true')
dev.updateStateOnServer('mapUpdateNeeded',value=True)
self.godoMapping(str(follow['location']['latitude']),str(follow['location']['longitude']),dev)
return
except Exception as e:
self.logger.debug(unicode('Exception in refreshDataforDev: ' + unicode(e)))
self.logger.debug('Exception:')
self.logger.exception(unicode('Possibility missing some data from icloud: Is your account setup with FindFriends enabled on iOS/Mobile device?'))
dev.updateStateOnServer('deviceIsOnline', value=False, uiValue='Offline')
dev.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
return
def requestSaveUrl(self, url, file):
try:
self.logger.debug("Saving url"+url+" as file:"+file)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r = requests.get(url, headers=headers, stream=True, timeout=10)
with open(file, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
except:
if self.debugmaps:
self.logger.exception("Exception in saveURL Requests")
self.logger.debug("Exception in save Map Requests")
def godoMapping(self, latitude, longitude, dev):
self.logger.debug(u"godoMapping() method called.")
try:
if not self.useMaps:
if self.debugmaps:
self.logger.debug("UseMaps not enabled. Returning.")
return
MAChome = os.path.expanduser("~") + "/"
folderLocation = MAChome + "Documents/Indigo-iFindFriendMini/"
filename = dev.name.replace(' ','_')+'_Map.jpg'
file = folderLocation +filename
#Generate single device URL
if dev.states['mapUpdateNeeded']:
self.logger.debug(u'update Map Happening as device moved..')
drawUrl = self.urlGenerate(latitude ,longitude ,self.googleAPI, int(self.configHorizontalMap), int(self.configVerticalMap), int(self.configZoomMap), dev)
if self.debugmaps:
self.logger.debug(u'drawURL 0=:'+unicode(drawUrl[0]))
if self.debugmaps:
webbrowser.open_new(drawUrl[0])
#webbrowser.open_new(drawUrl[1])
#fileMap = "curl --output '" + file + "' --url '" + drawUrl[0] + "'"
#os.system(fileMap)
self.requestSaveUrl(drawUrl[0],file)
self.logger.debug('Saving Map...' + file)
filename = 'All_device.jpg'
file = folderLocation + filename
# Generate URL for All Maps - if using Google; not with openstreetmap
if self.mapType=='google':
drawUrlall = self.urlAllGenerate(self.googleAPI, int(self.configHorizontalMap), int(self.configVerticalMap), int(self.configZoomMap))
#fileMap = "curl --output '" + file + "' --url '" + drawUrlall + "'"
#os.system(fileMap)
self.requestSaveUrl(drawUrlall, file)
if self.debugmaps:
self.logger.debug('Saving Map...' + file)
dev.updateStateOnServer('mapUpdateNeeded',value=False)
dev.updateStateOnServer('googleMapUrl', value=str(drawUrl[1]) )
self.logger.debug(u'Updating Variable:'+unicode(dev.name))
variablename =''.join(dev.name.split())
self.updateVar(variablename, str(drawUrl[1]))
update_time = t.strftime(self.datetimeFormat)
dev.updateStateOnServer('mapLastUpdated', value=str(update_time))
if self.debugmaps and self.mapType=='google':
webbrowser.open_new(drawUrlall)
self.logger.debug(u'Mapping URL:')
self.logger.debug(unicode(drawUrlall))
return
else:
self.logger.debug(u'No Mapping Needed.')
return
except Exception as e:
self.logger.exception(u'Exception within godoMapping: '+unicode(e))
def refreshDataForDevAction(self, valuesDict):
"""
The refreshDataForDevAction() method refreshes data for a selected
device based on a plugin action call.
"""
self.logger.debug(u"refreshDataForDevAction() method called.")
return True
def toggleDebugEnabled(self):
""" Toggle debug on/off. """
self.logger.debug(u"toggleDebugEnabled() method called.")
if self.debugLevel == int(logging.INFO):
self.debug = True
self.debugLevel = int(logging.DEBUG)
self.pluginPrefs['showDebugInfo'] = True
self.pluginPrefs['showDebugLevel'] = int(logging.DEBUG)
self.logger.info(u"Debugging on.")
self.logger.debug(u"Debug level: {0}".format(self.debugLevel))
self.logLevel = int(logging.DEBUG)
self.logger.debug(u"New logLevel = " + str(self.logLevel))
self.indigo_log_handler.setLevel(self.logLevel)
else:
self.debug = False
self.debugLevel = int(logging.INFO)
self.pluginPrefs['showDebugInfo'] = False
self.pluginPrefs['showDebugLevel'] = int(logging.INFO)
self.logger.info(u"Debugging off. Debug level: {0}".format(self.debugLevel))
self.logLevel = int(logging.INFO)
self.logger.debug(u"New logLevel = " + str(self.logLevel))
self.indigo_log_handler.setLevel(self.logLevel)
def toggleDebugMax(self):
""" Toggle debug on/off. """
self.logger.debug(u"toggleDebugMax() method called.")
self.debug = True
self.debugLevel = int(logging.DEBUG)
self.pluginPrefs['showDebugInfo'] = True
self.pluginPrefs['showDebugLevel'] = int(logging.DEBUG)
self.logger.info(u"Debugging on.")
self.logger.debug(u"Debug level: {0}".format(self.debugLevel))
self.logLevel = int(logging.DEBUG)
self.logger.debug(u"New logLevel = " + str(self.logLevel))
self.indigo_log_handler.setLevel(self.logLevel)
def myFriendDevices(self, filter=0, valuesDict=None, typeId="", targetId=0):
################################################
# Internal - Lists the Friends linked to an account
try:
self.logger.debug(unicode(u'myFriendDevices Called...'))
# try:
# Create an array where each entry is a list - the first item is
# the value attribute and last is the display string that will be shown
# Devices filtered on the chosen account
#self.logger.info(unicode(valuesDict))
iFriendArray = []
username = self.pluginPrefs.get('appleId', '')
password = self.pluginPrefs.get('applePwd', '')
appleAPIId = self.pluginPrefs.get('appleAPIid', '')
if appleAPIId == '':
iWait = 0, "Set up Apple Account in Plugin Config"
iFriendArray.append(iWait)
return iFriendArray
# go no futher unless have account details entered
iLogin = self.iAuthorise(username, password)
if iLogin[0] == 1:
self.logger.debug(u"Login to icloud Failed.")
iWait = 0, 'Login to icloud Failed'
iFriendArray.append(iWait)
return iFriendArray
following = iLogin[1].friends.data['following']
for fol in following:
# self.logger.info(unicode(fol['id']))
# self.logger.info(unicode(fol['invitationFromEmail']))
iOption2 = fol['id'], fol['invitationAcceptedByEmail']
#self.logger.info(unicode(iOption2))
iFriendArray.append(iOption2)
return iFriendArray
except:
self.logger.info(u'Error within myFriendsDevices')
return []
def myDevices(self, filter=0, valuesDict=None, typeId="", targetId=0):
################################################
# Internal - Lists the Friends linked to an account
try:
self.logger.debug(unicode(u'myDevices Called...'))
# try:
# Create an array where each entry is a list - the first item is
# the value attribute and last is the display string that will be shown
# Devices filtered on the chosen account
# self.logger.info(unicode(valuesDict))
iArray = []
username = self.pluginPrefs.get('appleId', '')
password = self.pluginPrefs.get('applePwd', '')
appleAPIId = self.pluginPrefs.get('appleAPIid', '')
if appleAPIId == '':
iWait = 0, "Set up Apple Account in Plugin Config"
iArray.append(iWait)
return iArray
# go no futher unless have account details entered
iLogin = self.iAuthorise(username, password)
if iLogin[0] == 1:
self.logger.debug(u"Login to icloud Failed.")
iWait = 0, 'Login to icloud Failed'
iArray.append(iWait)
return iArray
following = iLogin[1].devices
#devicetargets = self.appleAPI.devices
for fol in following:
self.logger.debug(unicode(fol['id'])+" and "+unicode(fol['name']))
iOption2 = fol['id'], fol['name']
# self.logger.info(unicode(iOption2))
iArray.append(iOption2)
return iArray
except:
self.logger.exception(u'Error within myDevices')
return []
def allDevicesOffline(self):
self.logger.debug("all Devices Offline")
for dev in indigo.devices.itervalues("self"):
# add check here make sure dev is Online before checking details of GeoFences
if dev.enabled:
dev.updateStateOnServer('deviceIsOnline', value=False, uiValue='Offline')
dev.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
return
def iAuthorise(self, iUsername, iPassword):
################################################s
# Logs in and authorises access to the Find my Phone API
# Logs into the find my phone API and returns an error if it doesn't work correctly
self.logger.debug('iAuthorise: Attempting login...')
# Logs into the API as required
try:
if self.appleAPI == None:
self.appleAPI = PyiCloudService(iUsername, iPassword, cookie_directory=self.iprefDirectory, session_directory=self.iprefDirectory+"/session", verify=True)
self.logger.debug(u"PyiCloudService start or redo FULL self.appleAPI full login...")
self.logger.debug(u'Login to account successful...')
self.logger.debug(u"Account Requires 2FA:" + unicode(self.appleAPI.requires_2fa))
if self.appleAPI:
self.appleAPI.authenticate(force_refresh=False)
self.logger.debug(u'Refresh Session appleAPI only.')
self.requires2FA = self.appleAPI.requires_2fa
if self.requires2FA:
self.logger.info(u"{0:=^130}".format(""))
self.logger.info(u"{0:=^130}".format(""))
self.logger.info( u"Account requires a two step authentication: Please see Plugin Config box to complete")
self.logger.info(u"Enter updated verification code in box and press submit.")
self.logger.info(u"{0:=^130}".format(""))
self.logger.info(u"{0:=^130}".format(""))
#self.appleAPI = None
return 2, self.appleAPI
# 2 = 2fa required
#self.appleAPI = self.self.appleAPI
if self.debugicloud:
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'type self.appleAPI result equals:')
self.logger.debug(unicode(type(self.appleAPI)))
#self.logger.debug(u'self.appleAPI.devices equals:')
#self.logger.debug(unicode(self.appleAPI.devices))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'self.appleAPI.friends.details equals:')
self.logger.debug(unicode(self.appleAPI.friends.details))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'self.appleAPI.friends.locations equals:')
self.logger.debug(unicode(self.appleAPI.friends.locations))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'Type of self.appleAPI.friends.locations equals:')
self.logger.debug(unicode(type(self.appleAPI.friends.locations)))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'Type of self.appleAPI.friends.data')
self.logger.debug(unicode(type(self.appleAPI.friends.data)))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'self.appleAPI.friends.data equals')
self.logger.debug(unicode(self.appleAPI.friends.data))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'self.appleAPI.friends.data[followers] equals:')
self.logger.debug(unicode(self.appleAPI.friends.data['followers']))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
follower = self.appleAPI.friends.data['followers']
self.logger.debug(u'follower or self.appleAPI.friends.data[followers] equals:')
for fol in follower:
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'Follower in follower: ID equals')
self.logger.debug(unicode(fol['id']))
self.logger.debug(u'email address from Id equals:')
self.logger.debug(unicode(fol['invitationFromEmail']))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u'self.appleAPI.friends.details equals:')
self.logger.debug(unicode(self.appleAPI.friends.details))
self.logger.debug(u"{0:=^130}".format(""))
self.logger.debug(u"{0:=^130}".format(""))
return 0, self.appleAPI
except PyiCloudFailedLoginException:
self.logger.error(u'Login failed - Check username/password - has it changed recently?. ')
self.appleAPI = None
self.allDevicesOffline()
return 1, 'NL'
except PyiCloud2SARequiredException:
self.logger.error(u'Login failed. Account requires 2nd factor, verification code setup. Please see config window')
self.requires2FA = True
self.appleAPI = None
self.allDevicesOffline()
self.triggerCheck2fa()
return 1, 'NL'
except ValueError as e:
self.logger.error(u"{0:=^130}".format(""))
self.logger.error(u'Login failed - 2FA Authenication is supported. ')
self.logger.debug(u'Error Given is:'+unicode(e.message)+unicode(e.__dict__))
self.logger.error(u"{0:=^130}".format(""))
self.allDevicesOffline()
return 1, 'NL'
except PyiCloudAPIResponseException as e:
self.logger.debug(u'Login Failed API Response Error. ' + unicode(e.message) + unicode(e.__dict__))
self.logger.debug(e)
if e.code in [450,421,500]:
self.logger.info("Error Code 450/421/500 Given: Re-authentication seems to be required. Reauthenicating now.")
self.appleAPI.authenticate(True)
try:
self.logger.debug(u"Testing ********************************************")
self.logger.debug( self.appleAPI.devices[0].location() )
self.logger.debug(u"********************************************")
return 0, self.appleAPI
except PyiCloudAPIResponseException:
self.logger.debug("Could not re-authenticate at all... Sorry.")
self.appleAPI = None
self.allDevicesOffline()
return 1, 'NI'
return 1, 'NI'
except Exception as e:
self.logger.debug(u'Login Failed General Error. ' + unicode(e.message) + unicode(e.__dict__))
self.logger.info(u"Issue connecting to icloud. ?Internet issue, or temp icloud server down...")
self.logger.debug(e)
return 1, 'NI'
def deleteAccount(self,valuesDict):
self.logger.debug(u'deleteAccount Button pressed Called.')
self.appleAPI = None
self.pluginPrefs['appleAPIid'] = ""
valuesDict['appleAPIid'] =""
valuesDict['appleId'] = ""
valuesDict['applePwd'] = ""
valuesDict['verficationcode'] = ""
indigoPreferencesPluginDir = indigo.server.getInstallFolderPath()+"/Preferences/Plugins/" + self.pluginId + "/"
self.logger.info("Deleting Session data from:"+unicode(indigoPreferencesPluginDir))
files = glob.glob(indigoPreferencesPluginDir+"/session/*")
for f in files:
self.logger.info("Deleting file:"+(unicode(f)))
try:
os.remove(f)
except OSError:
pass
files2 = glob.glob(indigoPreferencesPluginDir+"/*")
for fi in files2:
self.logger.info("Deleting file:"+(unicode(fi)))
try:
os.remove(fi)
except OSError:
pass
return valuesDict
def loginAccount(self, valuesDict):
self.logger.debug(u'loginAccount Button pressed Called.')
self.validatePrefsConfigUi(valuesDict)
self.logger.debug(u"Using Details: Username:"+unicode(valuesDict['appleId'])+u" and password:"+unicode(valuesDict['applePwd']))
self.appleAPI = None
self.pluginPrefs['appleAPIid']= ""
self.logger.info(u"{0:=^130}".format(""))
self.logger.info(u'Attempting Login to Apple Account:'+unicode(valuesDict['appleId']))
valuesDict['appleAPIid']=''
iLogin = self.iAuthorise(valuesDict['appleId'], valuesDict['applePwd'])
if self.appleAPI != None:
self.logger.info(u"Account username and password has been verifed by Apple")
if self.appleAPI.requires_2fa==False:
self.logger.info(u"Two Factor Authenication (2FA) is NOT enabled on this account")
self.logger.info(u"OR this Computer/Device is a Trusted Session. Hence Code not needed")
self.logger.info(u"This is the ideal setup for iFindFriends")
self.logger.info(u"Nothing further is required and the account should be functioning")
self.logger.info(u"Please select options and press Save.")
else:
self.logger.info(u"Two Factor Authenication (2FA) is enabled on this account")
self.logger.info(u"Please enable the use 2FA checkbox to continue.")
self.logger.info(u"Another device from this account is required to verify the account")
self.logger.info(u"From this other device please approve and enter the code | |
{prediction}")
y_raw_predictions.append(prediction_raw)
y_predicted.append(prediction)
return y_predicted, y_raw_predictions
def accuracy(self, data_x: np.ndarray, data_y: np.ndarray,
debug: Dict) -> Tuple[int, np.ndarray]:
if debug['metrics']:
logger.nl()
logger.info('Accuracy', color='cyan')
predictions, _ = self.predict(data_x, debug=debug['metrics'])
result_accuracy = sum(int(np.array_equal(pred.astype(int), true.astype(int)))
for (pred, true) in zip(predictions, data_y))
if debug['metrics']:
logger.info(f'result_accuracy: {result_accuracy}')
return result_accuracy, np.stack(predictions, axis=0)
def total_loss(self, data_x: np.ndarray, data_y: np.ndarray, regularization_param: float,
debug: Dict) -> List[Tuple[str, float]]:
if debug['metrics']:
logger.nl()
logger.info('Total Loss', color='cyan')
predictions, predictions_raw = self.predict(data_x, debug['metrics'])
mean_costs = [0.0 for _ in range(len(self.loss_functions))]
for ind, prediction_raw in enumerate(predictions_raw):
current_y = data_y[ind]
for loss_ind, loss_func in enumerate(self.loss_functions):
mean_costs[loss_ind] += loss_func(prediction_raw, current_y) / len(predictions_raw)
mean_costs[loss_ind] += 0.5 * (regularization_param / len(predictions_raw)) * sum(
np.linalg.norm(w) ** 2
for w in self.weights)
if debug['metrics']:
logger.info(f'ind: {ind}, prediction_raw: {prediction_raw.T}, current_y: {current_y}')
costs_with_names = []
for loss_ind, loss_func in enumerate(self.loss_functions):
costs_with_names.append((loss_func.__name__, 1.0 / len(data_y) * mean_costs[loss_ind]))
if debug['metrics']:
logger.info(f'Mean Costs: {mean_costs}')
return costs_with_names
@staticmethod
def cross_entropy(a, y):
return np.sum(np.nan_to_num(-y * np.log(a + 1e-15) - (1 - y) * np.log(1 - a + 1e-15)))
@staticmethod
def cross_entropy_derivative(z, a, y):
return a - y
@staticmethod
def mse(a, y):
return np.sum((a - y) ** 2)
mse_derivative = cross_entropy_derivative
@staticmethod
def x_y_split(dataset: np.ndarray) -> Tuple[np.array, np.array]:
return dataset[:, :-1], dataset[:, -1][:, np.newaxis].astype(int)
@staticmethod
def two_classes_split(dataset: np.ndarray) -> Tuple[np.array, np.array]:
data_x_c1_idx = dataset[:, -1] == 0
data_x_c1 = dataset[data_x_c1_idx][:, :-1]
data_x_c2_idx = dataset[:, -1] == 1
data_x_c2 = dataset[data_x_c2_idx][:, :-1]
return data_x_c1, data_x_c2
def train_bpnn(name, dataset, targets, hidden_layers, activations, loss_functions, lr, momentum,
batch_size, early_stopping, max_epochs, regularization_param, shuffle,
symmetric_weights, seed, debug, save_data=False):
logger.nl()
logger.info(f"Training {name} dataset..")
# Number of units per layer
n_units = [int(dataset.shape[1]), *hidden_layers, int(targets.shape[1])]
logger.info(n_units)
# Initialize Model
mlp_model = MultiLayerPerceptron(units=n_units, activations=activations,
symmetric_weights=symmetric_weights,
loss_functions=loss_functions, seed=seed)
# Train
accuracies, losses, times = mlp_model.train(data=dataset, one_hot_y=targets,
batch_size=batch_size, lr=lr, momentum=momentum,
shuffle=shuffle, max_epochs=max_epochs,
early_stopping=early_stopping,
regularization_param=regularization_param,
debug=debug, save_data=save_data)
return mlp_model, accuracies, losses, times
def test_and_plot_bpnn(title, test_set=None, one_hot_targets=None, model=None, accuracies=None,
losses=None,
times=None,
subsample=1, min_acc: float = 0.0, save_predictions: bool = False):
import types
# Test the full dataset
if isinstance(test_set, float):
test_accuracy = test_set
elif test_set is None:
test_accuracy = None
else:
model.predict = types.MethodType(MultiLayerPerceptron.predict, model)
test_accuracy, predictions_onehot = model.test(test_set.copy(), one_hot_targets.copy())
if save_predictions:
path = f'data/bpnn'
path_pred = f'{path}/predicted_y.pickle'
path_pred_onehot = f'{path}/predicted_onehot_y.pickle'
predictions = one_hot_unencode(predictions_onehot)
MultiLayerPerceptron.save_pickle(var=predictions, path=path_pred)
MultiLayerPerceptron.save_pickle(var=predictions_onehot, path=path_pred_onehot)
# Plot
plot_bpnn_results(title=title,
test_accuracy=test_accuracy,
accuracies=accuracies,
losses=losses,
times=times,
subsample=subsample, min_acc=min_acc)
# Implementation of kmeans clustering algorithm
class kmeans:
def __init__(self, X_train, max_iter=1000, k=2, dist='euclidean'):
self.X = X_train
self.k = k
self.max_iter = max_iter
self.centroids = []
self.switch = []
self.epoch = []
self.dist = dist
def fit(self):
np.random.seed(42)
idx = np.random.choice(len(self.X), self.k, replace=False)
centroids = self.X[idx, :]
pre_labels = np.argmin(distance.cdist(self.X, centroids, self.dist), axis=1)
for itr in range(self.max_iter):
tmp_centroids = []
for i in range(self.k):
# handle the case for orphan centroids
if self.X[pre_labels == i, :].shape[0] == 0:
tmp_centroids.append(centroids[i])
# print("orphan i ",i)
else:
tmp_centroids.append(self.X[pre_labels == i, :].mean(axis=0))
# centroids = np.vstack([self.X[pre_labels==i,:].mean(axis=0) for i in range(self.k)])
centroids = np.vstack(tmp_centroids)
current_labels = np.argmin(distance.cdist(self.X, centroids, self.dist), axis=1)
# print(itr, end=" ")
# print("swaps ", 100 * ( 1-(sum(pre_labels==current_labels)/len(pre_labels)) ) )
self.switch.append(100 * (1 - (sum(pre_labels == current_labels) / len(pre_labels))))
self.epoch.append(itr + 2)
if np.array_equal(pre_labels, current_labels):
break
pre_labels = current_labels
# print("epochs ",len(self.epoch))
self.centroids = centroids
@staticmethod
def classification_report(y_true, y_pred):
tn_00 = sum(y_pred[y_true == 0] == y_true[y_true == 0]) # true negatives
tp_11 = sum(y_pred[y_true == 1] == y_true[y_true == 1]) # true positives
fp_01 = sum(y_true == 0) - tn_00 # false positives
fn_10 = sum(y_true == 1) - tp_11 # false negatives
# confusion_matrix = np.array([[tn_00, fp_01], [fn_10, tp_11]])
class_0_accuracy = 100.0 * sum(y_pred[y_true == 0] == y_true[y_true == 0]) / sum(y_true == 0)
class_1_accuracy = 100.0 * sum(y_pred[y_true == 1] == y_true[y_true == 1]) / sum(y_true == 1)
# print("Kmeans Classification Report:")
print(f"Overall Accuracy: {round(100.0 * accuracy_score(y_true, y_pred), 2)} %")
print(f"F1-Score: {round(f1_score(y_true, y_pred), 3)}")
print(f"F1-Score Macro: {round(f1_score(y_true, y_pred, average='macro'), 3)}")
print(f"Class 0 accuracy: {round(class_0_accuracy, 2)} %")
print(f"Class 1 accuracy: {round(class_1_accuracy, 2)} %")
print("Confusion Matrix:")
confusion_matrix = PrettyTable(['', 'Predicted 0', 'Predicted 1', 'Total'])
confusion_matrix.add_row(['Actual 0', tn_00, fp_01, tn_00 + fp_01])
confusion_matrix.add_row(['Actual 1', fn_10, tp_11, fn_10 + tp_11])
confusion_matrix.add_row(
['Total', tn_00 + fn_10, fp_01 + tp_11, tn_00 + fn_10 + fp_01 + tp_11])
print(confusion_matrix)
def predict(self, data, y_true):
y_pred = np.argmin(distance.cdist(data, self.centroids, 'euclidean'), axis=1)
if accuracy_score(y_true, y_pred) < 0.5:
y_pred = 1 - y_pred
return y_pred
def plot_membership_switches(self):
plt.figure(figsize=(10, 8))
plt.plot(self.epoch, self.switch)
plt.title('Kmeans: Samples Membership Changes vs. Epoch')
plt.xlabel("Epoch")
plt.ylabel("Membership Changes (%)")
plt.grid(True)
plt.show()
# functions used for classification with kNN
def accuracy_score_knn(y, y_model):
assert len(y) == len(y_model)
classn = len(np.unique(y)) # number of different classes
correct_all = y == y_model # all correctly classified samples
acc_overall = np.sum(correct_all) / len(y)
acc_i = [] # list stores classwise accuracy
for i in np.unique(y):
acc_i.append(np.sum(correct_all[y == i]) / len(y[y == i]))
return acc_i, acc_overall
def euclidean(x1, x2):
edist = np.sqrt(np.sum((x1 - x2) ** 2))
return edist
def kNN_distances(train, ytrain, test):
alldist = []
# Calculate distance between test samples and all samples in training set
for i in test: # Loop through all observations in test set
point_dist = [] # Array to store distances from each observation in test set
for j in range(len(train)): # Loop through each point in the training data
distances = euclidean(np.array(train[j, :]), i) # Calculate Euclidean distances
point_dist.append(distances) # Add distance to array
point_dist = np.array(point_dist)
alldist.append(point_dist)
alldist = np.array(alldist)
return alldist
def bestk(train, alldist, ytrain, ytest, k_opt):
accuracy_classwise = []
accuracy_overall = []
# Assessing accuracy for different values of k
for k in k_opt:
ypredict_knn = kNN(train, alldist, ytrain, ytest, k)
acc_i, acc_overall = accuracy_score_knn(ytest, ypredict_knn)
accuracy_overall.append(acc_overall)
accuracy_classwise.append(acc_i)
accuracy_overall = np.array(accuracy_overall) # List of overall accuracy values for each k
accuracy_classwise = np.array(accuracy_classwise) # List of classwise accuracy values for each k
# optimal k for maximizing overall accuracy
best_k_overall = k_opt[accuracy_overall.argmax()]
# best overall accuracy
best_acc_overall = accuracy_overall[accuracy_overall.argmax()]
# class 0 accuracy for k with best overall accuracy
class0_acc_overall = accuracy_classwise[accuracy_overall.argmax()][0]
# class 1 accuracy for k with best overall accuracy
class1_acc_overall = accuracy_classwise[accuracy_overall.argmax()][1]
# optimal k for maximizing class 0 accuracy
best_k_class0 = k_opt[accuracy_classwise[:, 0].argmax()]
# best class 0 accuracy
best_acc_class0 = accuracy_classwise[accuracy_classwise[:, 0].argmax()][0]
# overall accuracy for k with best class 0 accuracy
overall_acc_class0 = accuracy_overall[accuracy_classwise[:, 0].argmax()]
# class 1 accuracy for k with best class 0 accuracy
class1_acc_class0 = accuracy_classwise[accuracy_classwise[:, 0].argmax()][1]
# optimal k for maximizing class 1 accuracy
best_k_class1 = k_opt[accuracy_classwise[:, 1].argmax()]
# best class 1 accuracy
best_acc_class1 = accuracy_classwise[accuracy_classwise[:, 1].argmax()][1]
# overall accuracy for k with best class 1 accuracy
overall_acc_class1 = accuracy_overall[accuracy_classwise[:, 1].argmax()]
# class 1 accuracy for k with best class 0 accuracy
class0_acc_class1 = accuracy_classwise[accuracy_classwise[:, 1].argmax()][0]
# Combine values for maximizing overall accuracy
k_overall = [best_k_overall, best_acc_overall, class0_acc_overall, class1_acc_overall]
# Combine values for maximizing class 0 accuracy
k_class0 = [best_k_class0, best_acc_class0, overall_acc_class0, class1_acc_class0]
# Combine values for maximizing class 0 accuracy
k_class1 = [best_k_class1, best_acc_class1, overall_acc_class1, class0_acc_class1]
return k_opt, accuracy_overall, accuracy_classwise, k_overall, k_class0, k_class1
def kNN(train, alldist, ytrain, ytest, k):
ypredict = []
for i in range(len(alldist)):
dist = np.argsort(alldist[i])[:k] # Sort the array of distances and retain k points
labels = ytrain[dist] # Getting y-values for k nearest neighbors in training set
# Sort and use majority voting for different values of k
lab = np.bincount(labels).argmax() # Most frequent value in array
ypredict.append(lab)
return ypredict
# For evaluation with an sklearn confusion matrix:
def evaluate_cm(sklearn_cm, output):
accuracy = (sklearn_cm[0, 0] + sklearn_cm[1, 1]) / sklearn_cm.sum()
precision = sklearn_cm[1, 1] / (sklearn_cm[1, 1] + sklearn_cm[0, 1])
sensitivity = sklearn_cm[1, 1] / (sklearn_cm[1, 1] + sklearn_cm[1, 0])
specificity = sklearn_cm[0, 0] / (sklearn_cm[0, 0] + sklearn_cm[0, 1])
f1_score = (2 * precision * sensitivity) / (precision + sensitivity)
if output == 'PRINT':
print('accuracy: ', accuracy, 'precision: ', precision,
'sensitivity: ', sensitivity, 'specificity: ',
specificity, 'f1_score: ', f1_score)
elif output == 'RETURN':
return (accuracy, precision, sensitivity, specificity, f1_score)
# Winner-Take-All Code
# Accuracy for WTA
def accuracy_score_wta(y, y_model):
assert len(y) == len(y_model)
classn = len(np.unique(y)) # number of different classes
correct_all = y == y_model # all | |
{vRK: Bipartite.KERNEL for vRK in set(self.RK.nodes)}, 'bipartite')
nx.set_node_attributes(self.RK, RK_types, 'type')
# define the arcs that go from the vertices of K to those of R\K, and viceversa
E_K2RK = {(K_node_IDs[0], RK_node_IDs[0])}
E_RK2K = {(RK_node_IDs[-1], K_node_IDs[-1])}
E_K2RK2K = E_K2RK | E_RK2K
# disintegrate `E_K2RK` and `E_RK2K` along fibres to speed up rule application
self.F_K2RK = {vK: set(arc for arc in E_K2RK if arc[0] == vK) for vK in set(self.K.nodes)}
self.F_RK2K = {vK: set(arc for arc in E_RK2K if arc[1] == vK) for vK in set(self.K.nodes)}
# # glue together the (sub-)graphs L\K and R\K along the vertices of K
# self.S = nx.compose(self.L, self.RK)
# self.S.add_edges_from(E_K2RK2K)
# since the GRR's L-term has been modified, rebuild the seeker
self.seeker = Seeker(self.L)
# this machinery can generate always-new identifiers for different rule applications
self._counter = itertools.count()
def _get_rule_count(self):
rule_count = ''.join(['FINQBNSTETB', __NODE_ID_FORMAT__.format(next(self._counter))])
return rule_count
def core(self, HI, g, nodes_dict):
# generate the substitute (sub-)graph J\I
rule_count = self._get_rule_count()
g_RK2JI = {vRK: '_'.join([rule_count, vRK.replace('R-term/', '')]) for vRK in set(self.RK.nodes)}
JI = nx.relabel_nodes(self.RK, g_RK2JI, copy=True)
# get pointers to the old modules;
# these pointers will enable two actions:
# 1. extracting the arguments required to perform the folding
# 2. extracting the parameters to instantiate the new modules
g_L2H = {vL: vH for vH, vL in g.items()}
mstein = nodes_dict[g_L2H['/'.join(['L-term', 'STEin'])]].nobj
minq2d = nodes_dict[g_L2H['/'.join(['L-term', 'Conv'])]].nobj
mbn2d = nodes_dict[g_L2H['/'.join(['L-term', 'BatchNorm'])]].nobj
msteout = nodes_dict[g_L2H['/'.join(['L-term', 'STEout'])]].nobj
mmxpold = nodes_dict[g_L2H['/'.join(['L-term', 'MaxPool'])]].nobj
# fold
weight, gamma, beta = foldsteinqconvbnste(mstein.num_levels, mstein.abs_max_value,
minq2d.weight_frozen,
mbn2d.running_mean, mbn2d.running_var, mbn2d.eps, mbn2d.weight, mbn2d.bias,
msteout.num_levels, msteout.abs_max_value,
gamma_int_bits=self._gamma_int_bits, gamma_frac_bits=self._gamma_frac_bits,
beta_int_bits=self._beta_int_bits, beta_frac_bits=self._beta_frac_bits)
# build the new modules
mtwconv = nn.Conv2d(minq2d.in_channels, minq2d.out_channels, minq2d.kernel_size,
stride=minq2d.stride, padding=minq2d.padding, dilation=minq2d.dilation, groups=minq2d.groups,
bias=minq2d.bias is not None).to(torch.device('cpu'))
mtwconv.weight.data = weight
mxpaffine = nn.Conv2d(minq2d.out_channels, minq2d.out_channels, 1,
stride=1, padding=0, groups=minq2d.out_channels,
bias=True).to(torch.device('cpu'))
mxpaffine.weight.data = gamma
mxpaffine.bias.data = beta
msandc = qg.graphs.ShiftAndClip(n_bits=math.ceil(math.log(msteout.num_levels, 2)),
shift=self._gamma_frac_bits,
signed=True, only_positive=True).to(torch.device('cpu'))
mmxpnew = nn.MaxPool2d(kernel_size=mmxpold.kernel_size, stride=mmxpold.stride, padding=mmxpold.padding)
# register the newly created nodes
vJI_2_ptnode = {}
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'TWConv'])]] = PyTorchNode(mtwconv)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'XPAffine'])]] = PyTorchNode(mxpaffine)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'S&C'])]] = PyTorchNode(msandc)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'MaxPool'])]] = PyTorchNode(mmxpnew)
return JI, vJI_2_ptnode
def apply(self, G, nodes_dict, g):
# create new containers
G = G.copy()
nodes_dict = {**nodes_dict}
# characterise the match graph H
VI = {vH for vH, vL in g.items() if vL in set(self.K.nodes)}
VHI = {vH for vH, vL in g.items() if vL not in set(self.K.nodes)}
HI = G.subgraph(VHI)
# generate the substitute (sub-)graph J\I
JI, vJI_2_ptnode = self.core(HI, g, nodes_dict)
# add the substitute (sub-)graph J\I to the main graph G
G = nx.compose(G, JI)
nodes_dict.update(vJI_2_ptnode)
# glue the substitute (sub-)graph J\I to the interface (sub-)graph I
JI2RK_morphisms = Seeker(self.RK).get_morphisms(JI)
assert len(JI2RK_morphisms) == 1
g_JI2RK = JI2RK_morphisms[0]
g_RK2JI = {vRK: vJI for vJI, vRK in g_JI2RK.items()}
for vI in VI:
vK = g[vI]
G.add_edges_from({(vI, g_RK2JI[vRK]) for (_, vRK) in self.F_K2RK[vK]})
G.add_edges_from({(g_RK2JI[vRK], vI) for (vRK, _) in self.F_RK2K[vK]})
# the new modules are fully integerized, so the precision tunnel should not embed integer numbers in floating point numbers
if nodes_dict[vI].ntype == qg.graphs.HelperOutputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperOutputPrecisionTunnel(1.0))
elif nodes_dict[vI].ntype == qg.graphs.HelperInputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperInputPrecisionTunnel(1.0))
else:
raise TypeError # interface nodes should be objects of class `qg.graphs.HelperPrecisionTunnel` only
# discard the match (sub-)graph H\I
G.remove_nodes_from(set(HI.nodes))
for vHI in VHI:
del nodes_dict[vHI]
return G, nodes_dict
def seek(self, G, nodes_dict):
gs = self.seeker.get_morphisms(G)
return gs
class FoldConvBNSTERule(DPORule):
def __init__(self):
K_types = OrderedDict()
K_types.update({'HI': qg.graphs.HelperInput.__name__})
K_types.update({'HPTin': qg.graphs.HelperInputPrecisionTunnel.__name__})
K_types = OrderedDict([('/'.join(['K-term', k]), v) for k, v in K_types.items()])
LK_types = OrderedDict()
LK_types.update({'Conv': nn.Conv2d.__name__})
LK_types.update({'BatchNorm': nn.BatchNorm2d.__name__})
LK_types.update({'ReLU': nn.ReLU.__name__})
LK_types.update({'STE': qa.ste.STEActivation.__name__})
LK_types = OrderedDict([('/'.join(['L-term', k]), v) for k, v in LK_types.items()])
RK_types = OrderedDict()
RK_types.update({'Conv': nn.Conv2d.__name__})
RK_types.update({'F&C': qg.graphs.FloorAndClip.__name__})
RK_types = OrderedDict([('/'.join(['R-term', k]), v) for k, v in RK_types.items()])
K_node_IDs = list(K_types.keys())
LK_node_IDs = list(LK_types.keys())
RK_node_IDs = list(RK_types.keys())
# define the template graph L [L-term]
L_node_IDs = [K_node_IDs[0]] + LK_node_IDs + [K_node_IDs[-1]]
self.L = nx.DiGraph()
self.L.add_edges_from({(u, v) for u, v in zip(L_node_IDs[:-1], L_node_IDs[1:])})
nx.set_node_attributes(self.L, {vL: Bipartite.KERNEL for vL in set(self.L.nodes)}, 'bipartite')
nx.set_node_attributes(self.L, {**K_types, **LK_types}, 'type')
# define the context (sub-)graph K [K-term]
VK = set(K_node_IDs) # precision tunnel nodes define the context graph
self.K = self.L.subgraph(VK)
# define the template (sub-)graph L\K
VLK = set(self.L.nodes).difference(set(self.K.nodes))
self.LK = self.L.subgraph(VLK)
# define the replacement (sub-)graph R\K ["gluing" R\K to K yields the graph R, i.e., the R-term]
self.RK = nx.DiGraph()
self.RK.add_edges_from({(u, v) for u, v in zip(RK_node_IDs[:-1], RK_node_IDs[1:])})
nx.set_node_attributes(self.RK, {vRK: Bipartite.KERNEL for vRK in set(self.RK.nodes)}, 'bipartite')
nx.set_node_attributes(self.RK, RK_types, 'type')
# define the arcs that go from the vertices of K to those of R\K, and viceversa
E_K2RK = {(K_node_IDs[0], RK_node_IDs[0])}
E_RK2K = {(RK_node_IDs[-1], K_node_IDs[-1])}
E_K2RK2K = E_K2RK | E_RK2K
# disintegrate `E_K2RK` and `E_RK2K` along fibres to speed up rule application
self.F_K2RK = {vK: set(arc for arc in E_K2RK if arc[0] == vK) for vK in set(self.K.nodes)}
self.F_RK2K = {vK: set(arc for arc in E_RK2K if arc[1] == vK) for vK in set(self.K.nodes)}
# # glue together the (sub-)graphs L\K and R\K along the vertices of K
# self.S = nx.compose(self.L, self.RK)
# self.S.add_edges_from(E_K2RK2K)
# since the GRR's L-term has been modified, rebuild the seeker
self.seeker = Seeker(self.L)
# this machinery can generate always-new identifiers for different rule applications
self._counter = itertools.count()
def _get_rule_count(self):
rule_count = ''.join(['FCBNSTE', __NODE_ID_FORMAT__.format(next(self._counter))])
return rule_count
def core(self, HI, g, nodes_dict):
# generate the substitute (sub-)graph J\I
rule_count = self._get_rule_count()
g_RK2JI = {vRK: '_'.join([rule_count, vRK.replace('R-term/', '')]) for vRK in set(self.RK.nodes)}
JI = nx.relabel_nodes(self.RK, g_RK2JI, copy=True)
# get pointers to the old modules;
# these pointers will enable two actions:
# 1. extracting the arguments required to perform the folding
# 2. extracting the parameters to instantiate the new modules
g_L2H = {vL: vH for vH, vL in g.items()}
mconvold = nodes_dict[g_L2H['/'.join(['L-term', 'Conv'])]].nobj
mbn2d = nodes_dict[g_L2H['/'.join(['L-term', 'BatchNorm'])]].nobj
mste = nodes_dict[g_L2H['/'.join(['L-term', 'STE'])]].nobj
# fold
weight, bias = foldconvbnste(mconvold.weight,
mbn2d.running_mean, mbn2d.running_var, mbn2d.eps, mbn2d.weight, mbn2d.bias,
mste.num_levels, mste.abs_max_value)
# build the new modules
mconvnew = nn.Conv2d(mconvold.in_channels, mconvold.out_channels, mconvold.kernel_size,
stride=mconvold.stride, padding=mconvold.padding, dilation=mconvold.dilation, groups=mconvold.groups,
bias=True).to(torch.device('cpu'))
mconvnew.weight.data = weight
mconvnew.bias.data = bias
mfandc = qg.graphs.FloorAndClip(n_bits=math.ceil(math.log(mste.num_levels, 2)),
signed=True, only_positive=True).to(torch.device('cpu'))
# register the newly created nodes
vJI_2_ptnode = {}
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'Conv'])]] = PyTorchNode(mconvnew)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'F&C'])]] = PyTorchNode(mfandc)
return JI, vJI_2_ptnode
def apply(self, G, nodes_dict, g):
# create new containers
G = G.copy()
nodes_dict = {**nodes_dict}
# characterise the match graph H
VI = {vH for vH, vL in g.items() if vL in set(self.K.nodes)}
VHI = {vH for vH, vL in g.items() if vL not in set(self.K.nodes)}
HI = G.subgraph(VHI)
# generate the substitute (sub-)graph J\I
JI, vJI_2_ptnode = self.core(HI, g, nodes_dict)
# add the substitute (sub-)graph J\I to the main graph G
G = nx.compose(G, JI)
nodes_dict.update(vJI_2_ptnode)
# glue the substitute (sub-)graph J\I to the interface (sub-)graph I
JI2RK_morphisms = Seeker(self.RK).get_morphisms(JI)
assert len(JI2RK_morphisms) == 1
g_JI2RK = JI2RK_morphisms[0]
g_RK2JI = {vRK: vJI for vJI, vRK in g_JI2RK.items()}
for vI in VI:
vK = g[vI]
G.add_edges_from({(vI, g_RK2JI[vRK]) for (_, vRK) in self.F_K2RK[vK]})
G.add_edges_from({(g_RK2JI[vRK], vI) for (vRK, _) in self.F_RK2K[vK]})
# the new modules are fully integerized, so the precision tunnel should not embed integer numbers in floating point numbers
if nodes_dict[vI].ntype == qg.graphs.HelperInput.__name__:
pass
elif nodes_dict[vI].ntype == qg.graphs.HelperInputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperInputPrecisionTunnel(1.0))
else:
raise TypeError # interface nodes should be objects of class `qg.graphs.HelperPrecisionTunnel` only
# discard the match (sub-)graph H\I
G.remove_nodes_from(set(HI.nodes))
for vHI in VHI:
del nodes_dict[vHI]
return G, nodes_dict
def seek(self, G, nodes_dict):
gs = self.seeker.get_morphisms(G)
return gs
class FoldSTEINQConvBNRule(DPORule):
def __init__(self):
K_types = OrderedDict()
K_types.update({'HI': qg.graphs.HelperOutputPrecisionTunnel.__name__})
K_types.update({'MaxPool': nn.MaxPool2d.__name__})
K_types = OrderedDict([('/'.join(['K-term', k]), v) for k, v in K_types.items()])
LK_types = OrderedDict()
LK_types.update({'STE': qa.ste.STEActivation.__name__})
LK_types.update({'INQConv': qa.inq.INQConv2d.__name__})
LK_types.update({'BatchNorm': nn.BatchNorm2d.__name__})
LK_types.update({'ReLU': nn.ReLU.__name__})
LK_types = OrderedDict([('/'.join(['L-term', k]), v) for k, v in LK_types.items()])
RK_types = OrderedDict()
RK_types.update({'Conv': nn.Conv2d.__name__})
RK_types.update({'ReLU': nn.ReLU.__name__})
RK_types = OrderedDict([('/'.join(['R-term', k]), v) for k, v in RK_types.items()])
K_node_IDs = list(K_types.keys())
LK_node_IDs = list(LK_types.keys())
RK_node_IDs = list(RK_types.keys())
# define the template graph L [L-term]
L_node_IDs = [K_node_IDs[0]] + LK_node_IDs + [K_node_IDs[-1]]
self.L = nx.DiGraph()
self.L.add_edges_from({(u, v) for u, v in zip(L_node_IDs[:-1], L_node_IDs[1:])})
nx.set_node_attributes(self.L, {vL: Bipartite.KERNEL for vL in set(self.L.nodes)}, 'bipartite')
nx.set_node_attributes(self.L, {**K_types, | |
from . import assert_match, ntorch
import torch
from collections import OrderedDict
import pytest
import torch.nn.functional as F
from hypothesis import given
from .strategies import (
named_tensor,
broadcast_named_tensor,
mask_named_tensor,
dim,
dims,
name,
names,
)
from hypothesis.strategies import (
sampled_from,
lists,
data,
floats,
integers,
permutations,
)
## HYPOTHESIS Tests
@given(data(), named_tensor())
def test_stack_basic(data, x):
s = data.draw(dims(x))
n = data.draw(name(x))
x = x.stack(list(s), n)
assert n in x.dims
assert not (x.shape.keys() & s)
@given(named_tensor())
def test_deepcopy(x):
import copy
x2 = copy.deepcopy(x)
assert id(x2.values) != id(x.values)
assert torch.equal(x2.values, x.values)
assert id(x2._schema) != id(x._schema)
assert x2._schema._names == x._schema._names
assert x2._schema._masked == x._schema._masked
@given(data(), named_tensor())
def test_unique(data, x):
s = data.draw(dim(x))
nu, ni = data.draw(names(x, max_size=2))
output, inverse_indices = ntorch.unique(
x, sorted=True, return_inverse=True, dim=s, names=(nu, ni)
)
assert all(
output[{nu: i}] <= output[{nu: i + 1}]
for i in range(output.shape[nu] - 1)
)
assert torch.equal(
x.values, output.index_select(nu, inverse_indices).values
)
output, inverse_indices = ntorch.unique(
x, sorted=True, return_inverse=True, names=(nu, ni)
)
assert all(
(output[{nu: i}].values <= output[{nu: i + 1}].values)
for i in range(output.shape[nu] - 1)
)
assert torch.equal(
x.values, output.index_select(nu, inverse_indices).values
)
@given(data(), named_tensor())
def test_rename(data, x):
s = data.draw(dim(x))
n = data.draw(name(x))
x = x.rename(s, n)
assert n in x.dims
assert s not in x.dims
@given(data(), named_tensor())
def test_split(data, x):
s = data.draw(dim(x))
ns = list(data.draw(names(x)))
x2 = x.split(s, ns, **{n: 1 for n in ns[:-1]})
assert len(set(ns) & set(x2.dims)) == len(ns)
assert s not in x2.dims
assert torch.prod(torch.tensor([x2.shape[n] for n in ns])) == x.shape[s]
@given(data(), named_tensor())
def test_reduce(data, x):
ns = data.draw(dims(x))
method = data.draw(sampled_from(sorted(x._reduce)))
if method in ["squeeze"]:
return
if method not in ["logsumexp"]:
y = getattr(x, method)()
print(y)
# assert y.values == getattr(x.values, method)()
x2 = getattr(x, method)(tuple(ns))
assert set(x2.dims) | set(ns) == set(x.dims)
@given(data(), named_tensor())
def test_binary_op(data, x):
y = data.draw(broadcast_named_tensor(x))
method = data.draw(sampled_from(sorted(x._binop)))
x2 = getattr(x, method)(y)
assert set(x2.dims) == set(x.dims) | set(y.dims)
x3 = getattr(y, method)(x)
assert set(x3.dims) == set(x.dims) | set(y.dims)
@given(data(), named_tensor())
def test_noshift(data, x):
method = data.draw(
sampled_from(sorted(x._noshift)).filter(lambda a: a not in {"cuda"})
)
x2 = getattr(x, method)()
assert set(x2.dims) == set(x.dims)
@given(data(), named_tensor())
def test_apply(data, x):
method = data.draw(
sampled_from(sorted(x._noshift_dim | x._noshift_nn_dim))
)
s = data.draw(dim(x))
x2 = getattr(x, method)(s)
assert x.shape == x2.shape
@given(named_tensor())
def test_sum(x):
s = x.sum()
print(x.shape)
assert s.values == x.values.sum()
@given(data(), named_tensor())
def test_mask(data, x):
mask = data.draw(mask_named_tensor(x))
x2 = x.masked_select(mask, "c")
x2 = x[mask]
print(x2)
@pytest.mark.xfail
@given(data(), named_tensor())
def test_maskfail(data, x):
mask = data.draw(broadcast_named_tensor(x))
x2 = x.masked_select(mask, "c")
x2 = x[mask]
print(x2)
@given(data(), named_tensor(), floats(allow_nan=False, allow_infinity=False))
def test_all_scalar_ops(data, x, y):
x = x + y
x = x - y
x = x * y
x = x / y
x = y + x
x = y - x
x = y * x
x = -x
def test_mod():
base1 = ntorch.tensor(torch.Tensor([[1, 2, 3], [3, 4, 5]]), ("a", "b"))
expected = ntorch.tensor(torch.Tensor([[1, 2, 0], [0, 1, 2]]), ("a", "b"))
assert_match(ntorch.fmod(base1, 3), expected)
@given(data(), named_tensor())
def test_indexing(data, x):
d = data.draw(dim(x))
i = data.draw(integers(min_value=0, max_value=x.shape[d] - 1))
x2 = x[{d: i}]
assert set(x2.dims) == set(x.dims) - set([d])
ds = data.draw(dims(x))
index = {}
for d in ds:
i = data.draw(integers(min_value=0, max_value=x.shape[d] - 1))
index[d] = i
x2 = x[index]
assert set(x2.dims) == set(x.dims) - set(ds)
ds = data.draw(dims(x))
index = {}
for d in ds:
i = data.draw(integers(min_value=0, max_value=x.shape[d] - 1))
j = data.draw(integers(min_value=i + 1, max_value=x.shape[d]))
index[d] = slice(i, j)
x2 = x[index]
assert set(x2.dims) == set(x.dims)
x[index] = 6
@given(data(), named_tensor())
def test_tensor_indexing(data, x):
d = data.draw(dim(x))
indices = data.draw(
lists(integers(min_value=0, max_value=x.shape[d] - 1), unique=True)
)
n = data.draw(name(x))
ind_vector = ntorch.tensor(indices, names=n).long()
x2 = x[{d: ind_vector}]
assert set(x2.dims) == (set(x.dims) | set([n])) - set([d])
x[{d: ind_vector}] = 5
assert (x[{d: ind_vector}] == 5).all()
@given(data(), named_tensor())
def test_tensor_mask(data, x):
mask = data.draw(mask_named_tensor(x))
x[mask] = 6
x2 = x[mask]
assert x2.dims == ("on",)
@given(data(), named_tensor())
def test_cat(data, x):
perm = data.draw(permutations(x.dims))
y = x.transpose(*perm)
for s in set(x.dims) & set(y.dims):
c = ntorch.cat([x, y], dim=s)
c = ntorch.cat([x, c], dim=s)
c = ntorch.cat([c, x, y], dim=s)
print(c)
@given(data(), named_tensor())
def test_stack(data, x):
perm = data.draw(permutations(x.dims))
print(perm)
y = x.transpose(*perm)
n = data.draw(name(x))
z = ntorch.stack([x, y], n)
assert set(z.dims) == set(x.dims) | set([n])
@given(data(), named_tensor())
def test_dot(data, x):
y = data.draw(broadcast_named_tensor(x))
dsx = data.draw(dims(x))
dsy = data.draw(dims(x))
x.dot(dsx, y)
x.dot(dsy, y)
y.dot(dsx, x)
y.dot(dsy, x)
## Old style tests
def test_apply2():
base = torch.zeros([10, 2, 50])
ntensor = ntorch.tensor(base, ("alpha", "beta", "gamma"))
ntensor = ntensor.op(F.softmax, dim="alpha")
assert (ntorch.abs(ntensor.sum("alpha") - 1.0) < 1e-5).all()
# def test_fill():
# base = torch.zeros([10, 2, 50])
# ntensor = ntorch.tensor(base, ("alpha", "beta", "gamma"))
# ntensor.fill_(20)
# assert (ntensor == 20).all()
def test_gather():
t = torch.Tensor([[1, 2], [3, 4]])
base = torch.gather(t, 1, torch.LongTensor([[0, 0], [1, 0]]))
t = ntorch.tensor(torch.Tensor([[1, 2], [3, 4]]), ("a", "b"))
index = ntorch.tensor(torch.LongTensor([[0, 0], [1, 0]]), ("a", "c"))
ntensor = ntorch.gather(t, "b", index, "c")
assert (ntensor.values == base).all()
assert ntensor.shape == OrderedDict([("a", 2), ("c", 2)])
x = ntorch.tensor(torch.rand(2, 5), ("c", "b"))
y = ntorch.tensor(torch.rand(3, 5), ("a", "b"))
y.scatter_(
"a",
ntorch.tensor(
torch.LongTensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), ("c", "b")
),
x,
"c",
)
assert y.shape == OrderedDict([("a", 3), ("b", 5)])
def test_unbind():
base = torch.zeros([10, 2, 50])
ntensor = ntorch.tensor(base, ("alpha", "beta", "gamma"))
out = ntensor.unbind("beta")
assert len(out) == 2
assert out[0].shape == OrderedDict([("alpha", 10), ("gamma", 50)])
base = torch.zeros([10])
ntensor = ntorch.tensor(base, ("alpha",))
ntensor.fill_(20)
c = ntensor.unbind("alpha")
assert len(c) == 10
assert c[0].item() == 20
# @pytest.mark.xfail
# def test_fail():
# for base1, base2 in zip(
# make_tensors([10, 2, 50]), make_tensors([10, 20, 2])
# ):
# ntensor1 = NamedTensor(base1, ("alpha", "beta", "gamma"))
# ntensor2 = NamedTensor(base2, ("alpha", "beat", "gamma"))
# assert_match(ntensor1, ntensor2)
def test_multiple():
base1 = torch.rand([10, 2, 50])
base2 = torch.rand([10, 20, 2])
ntensor1 = ntorch.tensor(base1, ("alpha", "beta", "gamma"))
ntensor2 = ntorch.tensor(base2, ("alpha", "delta", "beta"))
assert_match(ntensor1, ntensor2)
# Try applying a projected bin op
base3 = torch.mul(base1.view([10, 1, 2, 50]), base2.view([10, 20, 2, 1]))
ntensor3 = ntensor1.mul(ntensor2).transpose(
"alpha", "delta", "beta", "gamma"
)
assert base3.shape == ntensor3.vshape
assert (base3 == ntensor3.values).all()
# def test_contract():
# base1 = torch.randn(10, 2, 50)
# ntensor1 = ntorch.tensor(base1, ("alpha", "beta", "gamma"))
# base2 = torch.randn(10, 20, 2)
# ntensor2 = ntorch.tensor(base2, ("alpha", "delta", "beta"))
# assert_match(ntensor1, ntensor2)
# base3 = torch.einsum("abg,adb->a", (base1, base2))
# ntensor3 = ntorch.dot(("beta", "gamma", "delta"), ntensor1, ntensor2)
# assert ntensor3.shape == OrderedDict([("alpha", 10)])
# assert ntensor3.vshape == base3.shape
# assert (np.abs(ntensor3._tensor - base3) < 1e-5).all()
# ntensora = ntensor.reduce("alpha", "mean")
# assert ntensora.named_shape == OrderedDict([("beta", 2),
# ("gamma", 50)])
# ntensorb = ntensor.reduce("alpha gamma", "mean")
# assert ntensorb.named_shape == OrderedDict([("beta", 2)])
# def test_lift():
# def test_function(tensor):
# return np.sum(tensor, dim=1)
# base = np.random.randn(10, 70, 50)
# ntensor = NamedTensor(base, 'batch alpha beta')
# lifted = lift(test_function, ["alpha beta"], "beta")
# ntensor2 = lifted(ntensor)
# assert ntensor2.named_shape == OrderedDict([("batch", 10),
# ("beta", 2)])
def test_unbind2():
base1 = torch.randn(10, 2, 50)
ntensor1 = ntorch.tensor(base1, ("alpha", "beta", "gamma"))
a, b = ntensor1.unbind("beta")
assert a.shape == OrderedDict([("alpha", 10), ("gamma", 50)])
# def test_access():
# base1 = torch.randn(10, 2, 50)
# ntensor1 = ntorch.tensor(base1, ("alpha", "beta", "gamma"))
# assert (ntensor1.access("gamma")[45] == base1[:, :, 45]).all()
# assert (ntensor1.get("gamma", 1)._tensor == base1[:, :, 1]).all()
# assert (ntensor1.access("gamma beta")[45, 1] == base1[:, 1, 45]).all()
def test_takes():
base1 = torch.randn(10, 2, 50)
ntensor1 = ntorch.tensor(base1, ("alpha", "beta", "gamma"))
indices = torch.ones(30).long()
ntensor2 = ntorch.tensor(indices, ("indices",))
selected = ntensor1.index_select("beta", ntensor2)
assert (selected._tensor == base1.index_select(1, indices)).all()
assert selected.shape == OrderedDict(
[("alpha", 10), ("indices", 30), ("gamma", 50)]
)
def test_narrow():
base1 = torch.randn(10, 2, 50)
ntensor1 = ntorch.tensor(base1, ("alpha", "beta", "gamma"))
narrowed = ntensor1.narrow("gamma", 0, 25)
assert narrowed.shape == OrderedDict(
[("alpha", 10), ("beta", 2), ("gamma", 25)]
)
# def test_ops():
# base1 = ntorch.randn(dict(alpha=10, beta=2, gamma=50))
# base2 = ntorch.log(base1)
# base2 = ntorch.exp(base1)
@pytest.mark.xfail
def test_mask2():
base1 = ntorch.randn(10, 2, 50, names=("alpha", "beta", "gamma"))
base2 = base1.mask_to("alpha")
print(base2._schema._masked)
base2 = base2.softmax("alpha")
def test_unmask():
base1 = ntorch.randn(10, 2, 50, names=("alpha", "beta", "gamma"))
base2 = base1.mask_to("alpha")
base2 = base2.mask_to("")
base2 = base2.softmax("alpha")
# def test_division():
# base1 = NamedTensor(torch.ones(3, 4), ("short", "long"))
# expected = NamedTensor(torch.ones(3) / 4, ("short",))
# assert_match(base1 / base1.sum("long"), expected)
# def test_scalarmult():
# base1 = NamedTensor(torch.ones(3, 4), ("short", "long"))
# rmul = 3 * base1
# lmul = base1 * 3
# assert_match(rmul, lmul)
# def test_subtraction():
# base1 = ntorch.ones(3, 4, names=("short", "long"))
# base2 = ntorch.ones(3, 4, names=("short", "long"))
# expect = ntorch.zeros(3, 4, names=("short", "long"))
# assert_match(base1 - base2, expect)
# def test_rightsubtraction():
# base1 = ntorch.ones(3, 4, names=("short", "long"))
# expect = ntorch.zeros(3, 4, names=("short", "long"))
# assert_match(1 - base1, expect)
# def test_rightaddition():
# base1 | |
to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Delco = tk.Button(master=back, text='Delco', command=CMD_Delco, width=14, height=3)
Button_Delco.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Nakina = tk.Button(master=back, text='Nakina', command=CMD_Nakina, width=14, height=3)
Button_Nakina.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Whiteville = tk.Button(master=back, text='Whiteville', command=CMD_Whiteville, width=14, height=3)
Button_Whiteville.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Craven(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_New_Bern = tk.Button(master=back, text='New_Bern', command=CMD_New_Bern, width=14, height=3)
Button_New_Bern.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Cumberland(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Fayetteville_TWC = tk.Button(master=back, text='Fayetteville_TWC', command=CMD_Fayetteville_TWC, width=14, height=3)
Button_Fayetteville_TWC.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Slocomb = tk.Button(master=back, text='Slocomb', command=CMD_Slocomb, width=14, height=3)
Button_Slocomb.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Cedar_Creek = tk.Button(master=back, text='Cedar_Creek', command=CMD_Cedar_Creek, width=14, height=3)
Button_Cedar_Creek.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Currituck(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Barco = tk.Button(master=back, text='Barco', command=CMD_Barco, width=14, height=3)
Button_Barco.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, | |
Team.
"""
pass
def test_teams_id_data_source_soaps_fk_delete(self):
"""
Test case for teams_id_data_source_soaps_fk_delete
Delete a related item by id for dataSourceSoaps.
"""
pass
def test_teams_id_data_source_soaps_fk_get(self):
"""
Test case for teams_id_data_source_soaps_fk_get
Find a related item by id for dataSourceSoaps.
"""
pass
def test_teams_id_data_source_soaps_fk_put(self):
"""
Test case for teams_id_data_source_soaps_fk_put
Update a related item by id for dataSourceSoaps.
"""
pass
def test_teams_id_data_source_soaps_get(self):
"""
Test case for teams_id_data_source_soaps_get
Queries dataSourceSoaps of Team.
"""
pass
def test_teams_id_data_source_soaps_nk_dynamic_datas_count_get(self):
"""
Test case for teams_id_data_source_soaps_nk_dynamic_datas_count_get
Counts dynamicDatas of DataSourceSoap.
"""
pass
def test_teams_id_data_source_soaps_nk_dynamic_datas_delete(self):
"""
Test case for teams_id_data_source_soaps_nk_dynamic_datas_delete
Deletes all dynamicDatas of this model.
"""
pass
def test_teams_id_data_source_soaps_nk_dynamic_datas_fk_delete(self):
"""
Test case for teams_id_data_source_soaps_nk_dynamic_datas_fk_delete
Delete a related item by id for dynamicDatas.
"""
pass
def test_teams_id_data_source_soaps_nk_dynamic_datas_fk_get(self):
"""
Test case for teams_id_data_source_soaps_nk_dynamic_datas_fk_get
Find a related item by id for dynamicDatas.
"""
pass
def test_teams_id_data_source_soaps_nk_dynamic_datas_fk_put(self):
"""
Test case for teams_id_data_source_soaps_nk_dynamic_datas_fk_put
Update a related item by id for dynamicDatas.
"""
pass
def test_teams_id_data_source_soaps_nk_dynamic_datas_get(self):
"""
Test case for teams_id_data_source_soaps_nk_dynamic_datas_get
Queries dynamicDatas of DataSourceSoap.
"""
pass
def test_teams_id_data_source_soaps_nk_dynamic_datas_post(self):
"""
Test case for teams_id_data_source_soaps_nk_dynamic_datas_post
Creates a new instance in dynamicDatas of this model.
"""
pass
def test_teams_id_data_source_soaps_nk_team_get(self):
"""
Test case for teams_id_data_source_soaps_nk_team_get
Fetches belongsTo relation team.
"""
pass
def test_teams_id_data_source_soaps_post(self):
"""
Test case for teams_id_data_source_soaps_post
Creates a new instance in dataSourceSoaps of this model.
"""
pass
def test_teams_id_delete(self):
"""
Test case for teams_id_delete
Delete a model instance by {{id}} from the data source.
"""
pass
def test_teams_id_dynamic_datas_count_get(self):
"""
Test case for teams_id_dynamic_datas_count_get
Counts dynamicDatas of Team.
"""
pass
def test_teams_id_dynamic_datas_fk_delete(self):
"""
Test case for teams_id_dynamic_datas_fk_delete
Delete a related item by id for dynamicDatas.
"""
pass
def test_teams_id_dynamic_datas_fk_get(self):
"""
Test case for teams_id_dynamic_datas_fk_get
Find a related item by id for dynamicDatas.
"""
pass
def test_teams_id_dynamic_datas_fk_put(self):
"""
Test case for teams_id_dynamic_datas_fk_put
Update a related item by id for dynamicDatas.
"""
pass
def test_teams_id_dynamic_datas_get(self):
"""
Test case for teams_id_dynamic_datas_get
Queries dynamicDatas of Team.
"""
pass
def test_teams_id_dynamic_datas_nk_data_source_mongo_get(self):
"""
Test case for teams_id_dynamic_datas_nk_data_source_mongo_get
Fetches belongsTo relation dataSourceMongo.
"""
pass
def test_teams_id_dynamic_datas_nk_data_source_ms_sql_get(self):
"""
Test case for teams_id_dynamic_datas_nk_data_source_ms_sql_get
Fetches belongsTo relation dataSourceMsSql.
"""
pass
def test_teams_id_dynamic_datas_nk_data_source_my_sql_get(self):
"""
Test case for teams_id_dynamic_datas_nk_data_source_my_sql_get
Fetches belongsTo relation dataSourceMySql.
"""
pass
def test_teams_id_dynamic_datas_nk_data_source_oracle_get(self):
"""
Test case for teams_id_dynamic_datas_nk_data_source_oracle_get
Fetches belongsTo relation dataSourceOracle.
"""
pass
def test_teams_id_dynamic_datas_nk_data_source_postgre_sql_get(self):
"""
Test case for teams_id_dynamic_datas_nk_data_source_postgre_sql_get
Fetches belongsTo relation dataSourcePostgreSql.
"""
pass
def test_teams_id_dynamic_datas_nk_data_source_rest_get(self):
"""
Test case for teams_id_dynamic_datas_nk_data_source_rest_get
Fetches belongsTo relation dataSourceRest.
"""
pass
def test_teams_id_dynamic_datas_nk_data_source_soap_get(self):
"""
Test case for teams_id_dynamic_datas_nk_data_source_soap_get
Fetches belongsTo relation dataSourceSoap.
"""
pass
def test_teams_id_dynamic_datas_nk_designs_count_get(self):
"""
Test case for teams_id_dynamic_datas_nk_designs_count_get
Counts designs of DynamicData.
"""
pass
def test_teams_id_dynamic_datas_nk_designs_fk_delete(self):
"""
Test case for teams_id_dynamic_datas_nk_designs_fk_delete
Delete a related item by id for designs.
"""
pass
def test_teams_id_dynamic_datas_nk_designs_fk_get(self):
"""
Test case for teams_id_dynamic_datas_nk_designs_fk_get
Find a related item by id for designs.
"""
pass
def test_teams_id_dynamic_datas_nk_designs_fk_put(self):
"""
Test case for teams_id_dynamic_datas_nk_designs_fk_put
Update a related item by id for designs.
"""
pass
def test_teams_id_dynamic_datas_nk_designs_get(self):
"""
Test case for teams_id_dynamic_datas_nk_designs_get
Queries designs of DynamicData.
"""
pass
def test_teams_id_dynamic_datas_nk_designs_post(self):
"""
Test case for teams_id_dynamic_datas_nk_designs_post
Creates a new instance in designs of this model.
"""
pass
def test_teams_id_dynamic_datas_nk_records_count_get(self):
"""
Test case for teams_id_dynamic_datas_nk_records_count_get
Count Dynamic Data records
"""
pass
def test_teams_id_dynamic_datas_nk_records_delete(self):
"""
Test case for teams_id_dynamic_datas_nk_records_delete
Delete all matching records.
"""
pass
def test_teams_id_dynamic_datas_nk_records_fk_delete(self):
"""
Test case for teams_id_dynamic_datas_nk_records_fk_delete
Delete a model instance by {{fk}} from the data source.
"""
pass
def test_teams_id_dynamic_datas_nk_records_fk_get(self):
"""
Test case for teams_id_dynamic_datas_nk_records_fk_get
Find a model instance by {{fk}} from the data source.
"""
pass
def test_teams_id_dynamic_datas_nk_records_fk_property_name_upload_put(self):
"""
Test case for teams_id_dynamic_datas_nk_records_fk_property_name_upload_put
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_teams_id_dynamic_datas_nk_records_fk_put(self):
"""
Test case for teams_id_dynamic_datas_nk_records_fk_put
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_teams_id_dynamic_datas_nk_records_get(self):
"""
Test case for teams_id_dynamic_datas_nk_records_get
Find all instances of the model matched by filter from the data source.
"""
pass
def test_teams_id_dynamic_datas_nk_records_migrate_post(self):
"""
Test case for teams_id_dynamic_datas_nk_records_migrate_post
Request migration for Dynamic Data records
"""
pass
def test_teams_id_dynamic_datas_nk_records_post(self):
"""
Test case for teams_id_dynamic_datas_nk_records_post
Create a new instance of the model and persist it into the data source.
"""
pass
def test_teams_id_dynamic_datas_nk_records_upload_csv_post(self):
"""
Test case for teams_id_dynamic_datas_nk_records_upload_csv_post
Upload CSV for this Dynamic Data
"""
pass
def test_teams_id_dynamic_datas_nk_team_get(self):
"""
Test case for teams_id_dynamic_datas_nk_team_get
Fetches belongsTo relation team.
"""
pass
def test_teams_id_dynamic_datas_post(self):
"""
Test case for teams_id_dynamic_datas_post
Creates a new instance in dynamicDatas of this model.
"""
pass
def test_teams_id_exists_get(self):
"""
Test case for teams_id_exists_get
Check whether a model instance exists in the data source.
"""
pass
def test_teams_id_get(self):
"""
Test case for teams_id_get
Find a model instance by {{id}} from the data source.
"""
pass
def test_teams_id_head(self):
"""
Test case for teams_id_head
Check whether a model instance exists in the data source.
"""
pass
def test_teams_id_image_folders_count_get(self):
"""
Test case for teams_id_image_folders_count_get
Counts imageFolders of Team.
"""
pass
def test_teams_id_image_folders_delete(self):
"""
Test case for teams_id_image_folders_delete
Deletes all imageFolders of this model.
"""
pass
def test_teams_id_image_folders_fk_delete(self):
"""
Test case for teams_id_image_folders_fk_delete
Delete a related item by id for imageFolders.
"""
pass
def test_teams_id_image_folders_fk_get(self):
"""
Test case for teams_id_image_folders_fk_get
Find a related item by id for imageFolders.
"""
pass
def test_teams_id_image_folders_fk_put(self):
"""
Test case for teams_id_image_folders_fk_put
Update a related item by id for imageFolders.
"""
pass
def test_teams_id_image_folders_get(self):
"""
Test case for teams_id_image_folders_get
Queries imageFolders of Team.
"""
pass
def test_teams_id_image_folders_nk_children_count_get(self):
"""
Test case for teams_id_image_folders_nk_children_count_get
Counts children of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_children_fk_delete(self):
"""
Test case for teams_id_image_folders_nk_children_fk_delete
Delete a related item by id for children.
"""
pass
def test_teams_id_image_folders_nk_children_fk_get(self):
"""
Test case for teams_id_image_folders_nk_children_fk_get
Find a related item by id for children.
"""
pass
def test_teams_id_image_folders_nk_children_fk_put(self):
"""
Test case for teams_id_image_folders_nk_children_fk_put
Update a related item by id for children.
"""
pass
def test_teams_id_image_folders_nk_children_get(self):
"""
Test case for teams_id_image_folders_nk_children_get
Queries children of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_children_post(self):
"""
Test case for teams_id_image_folders_nk_children_post
Creates a new instance in children of this model.
"""
pass
def test_teams_id_image_folders_nk_folder_members_count_get(self):
"""
Test case for teams_id_image_folders_nk_folder_members_count_get
Counts folderMembers of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_folder_members_delete(self):
"""
Test case for teams_id_image_folders_nk_folder_members_delete
Deletes all folderMembers of this model.
"""
pass
def test_teams_id_image_folders_nk_folder_members_fk_delete(self):
"""
Test case for teams_id_image_folders_nk_folder_members_fk_delete
Delete a related item by id for folderMembers.
"""
pass
def test_teams_id_image_folders_nk_folder_members_fk_get(self):
"""
Test case for teams_id_image_folders_nk_folder_members_fk_get
Find a related item by id for folderMembers.
"""
pass
def test_teams_id_image_folders_nk_folder_members_fk_put(self):
"""
Test case for teams_id_image_folders_nk_folder_members_fk_put
Update a related item by id for folderMembers.
"""
pass
def test_teams_id_image_folders_nk_folder_members_get(self):
"""
Test case for teams_id_image_folders_nk_folder_members_get
Queries folderMembers of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_folder_members_post(self):
"""
Test case for teams_id_image_folders_nk_folder_members_post
Creates a new instance in folderMembers of this model.
"""
pass
def test_teams_id_image_folders_nk_images_count_get(self):
"""
Test case for teams_id_image_folders_nk_images_count_get
Counts images of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_images_fk_delete(self):
"""
Test case for teams_id_image_folders_nk_images_fk_delete
Delete a related item by id for images.
"""
pass
def test_teams_id_image_folders_nk_images_fk_get(self):
"""
Test case for teams_id_image_folders_nk_images_fk_get
Find a related item by id for images.
"""
pass
def test_teams_id_image_folders_nk_images_fk_put(self):
"""
Test case for teams_id_image_folders_nk_images_fk_put
Update a related item by id for images.
"""
pass
def test_teams_id_image_folders_nk_images_get(self):
"""
Test case for teams_id_image_folders_nk_images_get
Queries images of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_images_post(self):
"""
Test case for teams_id_image_folders_nk_images_post
Creates a new instance in images of this model.
"""
pass
def test_teams_id_image_folders_nk_members_count_get(self):
"""
Test case for teams_id_image_folders_nk_members_count_get
Counts members of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_members_delete(self):
"""
Test case for teams_id_image_folders_nk_members_delete
Deletes all members of this model.
"""
pass
def test_teams_id_image_folders_nk_members_fk_delete(self):
"""
Test case for teams_id_image_folders_nk_members_fk_delete
Delete a related item by id for members.
"""
pass
def test_teams_id_image_folders_nk_members_fk_get(self):
"""
Test case for teams_id_image_folders_nk_members_fk_get
Find a related item by id for members.
"""
pass
def test_teams_id_image_folders_nk_members_fk_put(self):
"""
Test case for teams_id_image_folders_nk_members_fk_put
Update a related item by id for members.
"""
pass
def test_teams_id_image_folders_nk_members_get(self):
"""
Test case for teams_id_image_folders_nk_members_get
Queries members of ImageFolder.
"""
pass
def test_teams_id_image_folders_nk_members_post(self):
"""
Test case for teams_id_image_folders_nk_members_post
Creates a new instance in members of this model.
"""
pass
def test_teams_id_image_folders_nk_members_rel_fk_delete(self):
"""
Test case for teams_id_image_folders_nk_members_rel_fk_delete
Remove the members relation to an item by id.
"""
| |
to
# raise this on the user if there is a problem ASAP.
key = self._relationship_key(inline)
# Add all existing items
if obj:
session = object_session(obj)
index = -1 # Needed in case there are no items yet
for index, item in enumerate(getattr(obj, key)):
delete_key = 'delete_%s_%d' % (inline.name, index)
if formdata and delete_key in formdata:
session.delete(item)
# make sure the list is reloaded
session.expire(obj)
else:
form = inline(inline_formdata.get(index), item)
form.is_extra = False
inline_forms.append(form)
max_index = index + 1
else:
max_index = 0
# Only show extra fields when no object is attached or the current
# form has them added.
if count is None:
if obj:
extra = 0
else:
extra = inline.extra
else:
extra = count - max_index
if formdata and 'add_%s' % inline.name in formdata:
extra += 1
# Add empty form items
for index in range(max_index, extra + max_index):
# Only add an extra field if deletion of it was not requested
delete_key = 'delete_%s_%d' % (inline.name, index)
if not formdata or delete_key not in formdata:
form = inline(inline_formdata.get(index))
form.is_extra = True
inline_forms.append(form)
# For all forms, rename them and reassign their IDs as well. Only
# by this measure can be guaranteed that each item can be addressed
# individually.
for index, form in enumerate(inline_forms):
for field in form:
field.name = "%s_%d_%s" % (inline.name, index, field.name)
field.id = field.name
self.inline_fieldsets[inline.name] = inline, inline_forms
def populate_obj(self, obj):
super(ModelForm, self).populate_obj(obj)
self.populate_obj_inline(obj)
def populate_obj_inline(self, obj):
"""
Populate all inline objects. It takes the usual ``obj`` argument that
is the **parent** of the inline fields. From these all other values
are derived and finally the objects are updated.
.. note::
Right now this assumes the relationship operation is a ``append``,
thus for example set collections won't work right now.
"""
session = object_session(obj)
for inline, forms in self.inline_fieldsets.values():
inline_model = inline.Meta.model
for index, inline_form in enumerate(forms):
# Get the primary keys from the form. This ensures that we
# update existing objects while new objects get inserted.
pks = inline.pks_from_formdata(self.formdata, index)
if pks is not None:
assert not inline_form.is_extra
inline_obj = session.query(inline.Meta.model).get(pks)
if inline_obj is None:
raise LookupError("Target with pks %s does not exist"
% str(pks))
else:
assert inline_form.is_extra
inline_obj = inline_model()
relationship_key = self._relationship_key(inline)
getattr(obj, relationship_key).append(inline_obj)
# Since the form was created like a standard form and the
# object was loaded either from the database or newly created
# and added to its associated object, we can now just populate
# it as we would do with a normal form and object.
inline_form.populate_obj(inline_obj)
def validate(self):
result = super(ModelForm, self).validate()
inline_result = self.validate_inline()
return result and inline_result
def validate_inline(self):
"""
Validate all inline forms. Implicitly called by :meth:`validate`.
This will also fill the ``form.errors`` dict with additional error
messages based on invalid inline fields using the same naming pattern
used for naming inline fields for display and form submission, i.e.
``inlinename_index_fieldname``.
Thus, if errors exist on an inline field, they can be fetched from the
global errors dict the same way regular errors are present in it.
"""
valid = True
for inline, forms in self.inline_fieldsets.values():
for index, inline_form in enumerate(forms):
if not inline_form.validate():
valid = False
for field, entry in inline_form.errors.items():
field_name = '%s_%d_%s' % (inline.name, index, field)
self.errors[field_name] = entry
return valid
@six.add_metaclass(_CoreModelMeta)
class BaseInLine(_CoreModelForm):
"""
Base-class for all inline forms. You normally don't subclass from this
directly unless you want to create a new inline type. However, all
inline types share the attributes inherited by this template.
Inline forms are forms that are not intended to be displayed by themselves
but instead are added to the :ref:`inlines <inlines>` attribute of a normal
form. They will then be displayed inside the normal form while editing,
allowing for multiple instance to be added, deleted or modified at the same
time. They are heavily inspired by Django's inline forms.
An inline form is configurable with the following attributes, additionally
to any attribute provided by `WTForms-Alchemy`_
.. _WTForms-Alchemy: https://wtforms-alchemy.readthedocs.org
Meta
This is the standard `WTForms-Alchemy` attribute to configure the
model. Check out their documentation for specific details.
relationship_name
The name of the *other side* of the relationship. Determined
automatically, unless there are multiple relationships between the
models in which case this must be overridden by the subclass.
For example: If this is the child form to be inlined, the other side
might be called ``children`` and this might be called ``parent`` (or
it might not even exist, there is no need for a bidrectional
relationship). The correct value would then be ``children`` *not*
``parent``.
extra
How many empty fields to display in which new objects can be added. Pay
attention that often fields require intputs and thus extra field may
often not be left empty. This is an intentional restriction to allow
client-side validation without javascript. So only specify this if you
are sure that items will always be added (note, however, that the extra
attribute is not used to enforce a minimum number of members in the
database). Defaults to ``0``.
is_extra
A boolean indicating whether this instance is an extra field or a
persisted database field. Set during parent's processing.
"""
extra = 0
relationship_name = None
@classmethod
def pks_from_formdata(cls, formdata, index):
"""
Get a list of primary key values in the order of the primary keys on
the model. The returned value is suitable to be passed to
:meth:`sqlalchemy.orm.query.Query.get`.
:param formdata: A :class:`webob.multidict.MultiDict` that contains all
parameters that were passed to the form.
:param index: The index of the element for which the primary key is
desired. From this, the correct field name to get from ``fromdata``
is determined.
:type index: int
:return: A tuple of primary keys that uniquely identify the object in
the database. The order is based on the order of primary keys in
the table as reported by SQLAlchemy.
:rtype: tuple
"""
pks = []
for pk in get_pks(cls.Meta.model):
key = '%s_%d_%s' % (cls.name, index, pk)
pk_val = formdata.get(key)
if pk_val is None or pk_val == '':
return
pk_val = int(pk_val)
pks.append(pk_val)
return tuple(pks)
@classmethod
def _find_relationships_for_query(cls):
# Prevent parent from being displayed inline
rels = _CoreModelForm._find_relationships_for_query()
if not rels:
return []
inline_key = cls._parent._relationship_key(cls)
rels = [rel for rel in rels if rel.back_populates != inline_key]
return rels
class TabularInLine(BaseInLine):
"""
A base class for a tabular inline display. Each row is displayed in a
table row with the field labels being displayed in the table head. This is
basically a list view of the fields only that you can edit and delete them
and even insert new ones.
"""
#: The default template for a tabular display. It gets resolved by
#: :meth:`CRUDView.get_template <pyramid_crud.views.CRUDView.get_template>`
#: and should usually not need changing here.
template = 'edit_inline/tabular'
class CSRFModelForm(ModelForm, CSRFForm):
"""
A form that adds a CSRF token to the form. Derive from this class for
security critical operations (read: you want it most of the time and it
doesn't hurt).
Do not derive from this for inline stuff and other composite forms: Only
the main form should use this as you only need one token per request.
All configuration is done exactly in the same way as with the
:class:`.ModelForm` except for one difference: An additional
``csrf_context`` argument is required. The pre-configured views and
templates already know how to utilize this field and work fine with
and without it.
"""
# Developer Note: This form works through multiple inheritance. But the
# CSRFForm is not a typical mixin, it derives from the Form class whereas
# ModelForm also derives from it. As a result, Python's C3 implementation
# resolves this a bit unintuitively. However, this actually saves as: The
# calling goes up to the wtforms_alchemy.ModelForm but then, instead of
# going to wtforms.Form, it goes to CSRFForm. Thus, as long as the parent
# is | |
<filename>ui/app/app.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
##################################################################################
# Module: app.py
# Purpose: Flask main UI logic
#
# Notes:
#
##################################################################################
from flask import Flask, render_template, request, g, flash, url_for, redirect
from sqlalchemy import create_engine,func
from sqlalchemy.orm import sessionmaker
from flask_login import LoginManager, login_user , logout_user , current_user , login_required
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime, timedelta
import time
import os
import sys
import subprocess
import ui.utils.args as args
import data_pipeline.utils.dbuser as dbuser
import data_pipeline.db.factory as db_factory
import data_pipeline.db.connection_details as db_connection
import data_pipeline.constants.const as const
from ui.app.models import Base, ProcessControl, ProcessControlDetail, SourceSystemProfile, User, Connections, ProcessParameters, Profile
from ui.app.forms import ProfileForm, ProfileItemForm, ConnectionForm, UserForm, LoginForm, ParameterForm, ProfileHeaderForm
# Globals
argv = args.get_program_args()
# Audit DB details
db_string = "postgres://{conn_str}".format(conn_str=argv.audituser)
db = create_engine(db_string)
Session = sessionmaker(db)
session = Session()
# Create Flask App
app = Flask(__name__)
# initialise Scheduler
scheduler = BackgroundScheduler()
url = 'sqlite:///example.sqlite'
scheduler.add_jobstore('sqlalchemy', url=db_string)
# Initialise LoginManager
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
# user_loader callback function loads the user from the database
def load_user(id):
return session.query(User).get(int(id))
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET','POST'])
def login():
# initialize the form
form = LoginForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
username = request.form['username']
password = <PASSWORD>.form['password']
registered_user = session.query(User).filter_by(username=username,password=password).first()
if registered_user is None:
flash('Username or Password is invalid' , 'error')
return redirect(url_for('login'))
login_user(registered_user)
next = request.args.get('next')
return redirect(next or url_for('main'))
return render_template('login.html', form = form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
@app.route("/")
def main():
initsync_processes = session.query(ProcessControl.id, ProcessControl.profile_name, ProcessControl.profile_version, ProcessControl.process_code, ProcessControl.status, ProcessControl.total_count, ProcessControl.duration, ProcessControl.process_starttime, ProcessControl.process_endtime, func.count(ProcessControlDetail.id).label('total')).filter(ProcessControlDetail.comment.like('%Finished%')).filter(ProcessControl.process_code == 'InitSync').join(ProcessControlDetail).group_by(ProcessControl.id).order_by(ProcessControl.id.desc())
cdc_processes = session.query(ProcessControl.id, ProcessControl.profile_name, ProcessControl.profile_version, ProcessControl.process_code, ProcessControl.status, ProcessControl.total_count, ProcessControl.duration, ProcessControl.process_starttime, ProcessControl.process_endtime, func.sum(ProcessControlDetail.source_row_count).label('total')).filter(ProcessControl.process_code != 'InitSync').join(ProcessControlDetail).group_by(ProcessControl.id).order_by(ProcessControl.id.desc())
# pass the list of process control table entries to html
return render_template('index.html', cdc_processes = cdc_processes,initsync_processes = initsync_processes )
@app.route("/processlist", methods=['GET','POST'])
def processlist():
q = session.query(ProcessControl)
processControl = q.order_by(ProcessControl.id.desc())
if request.method == 'POST' :
# apply filters
filter_profile = request.form['profile']
filter_sourcesystem = request.form['sourcesystem']
filter_status = request.form['status']
if filter_profile:
q = q.filter(ProcessControl.profile_name==filter_profile)
if filter_status:
if filter_status != 'ALL':
q = q.filter(ProcessControl.status==filter_status)
if filter_sourcesystem:
q = q.filter(ProcessControl.source_system_code==filter_sourcesystem)
processControl = q.order_by(ProcessControl.id.desc())
# pass the list of process control table entries to html
return render_template('process_control_list.html', processControl = processControl)
@app.route("/processdetails/<process_id>/", methods=['GET','POST'])
def processdetails(process_id):
processControlDetail = session.query(ProcessControlDetail).filter(ProcessControlDetail.run_id == process_id)
p = session.query(ProcessControlDetail).filter(ProcessControlDetail.run_id == process_id).first()
if p:
process = p.process_code
else:
process = 'No details found!'
if request.method == 'POST':
# get the order by field name from select list
order_by = request.form['order_by']
# default order is by object name
if order_by == 'status':
processControlDetail = processControlDetail.order_by(ProcessControlDetail.status)
if order_by == 'process_starttime':
processControlDetail = processControlDetail.order_by(ProcessControlDetail.process_starttime)
else:
processControlDetail = processControlDetail.order_by(ProcessControlDetail.object_name, ProcessControlDetail.process_code.desc())
# pass the list of process control detail table entries to html
return render_template('process_control_details.html', processControlDetail = processControlDetail, runid = process_id, process = process)
@app.route("/profilelist", methods=['GET','POST'])
def profilelist():
profiles = session.query(Profile).order_by(Profile.profile_name, Profile.version)
names = session.query(Profile).distinct(Profile.profile_name).order_by(Profile.profile_name)
if request.method == 'POST' :
# apply filters
filter_profile = request.form['profile']
filter_version = request.form['version']
filter_sourcesystem = request.form['sourcesystem']
if filter_profile:
if filter_profile != 'ALL':
profiles = profiles.filter(Profile.profile_name==filter_profile)
if filter_version:
profiles = profiles.filter(Profile.version==filter_version)
if filter_sourcesystem:
profiles = profiles.filter(Profile.source_system_code==filter_sourcesystem)
# pass the list of source system profile table entries to html
return render_template('profile_list.html', profiles = profiles, names = names)
@app.route("/profileadd", methods=['GET','POST'])
@login_required
def profileadd():
# create an empty object
profile = Profile()
# initialize the form
form = ProfileHeaderForm(request.form, obj=profile)
form.source_connection.query = session.query(Connections.connection_name).order_by(Connections.connection_name)
if request.method == 'POST' and form.validate_on_submit():
# The request is POST
# get the values from form and save
form.populate_obj(profile)
# Update date time fields from System date time values
# now = datetime.now()
# profile.created_date = now
# TODO: this method will be available to admin users only.
# so the following is for test implementation only
# profile.created_by = 'admin'
try:
session.add(profile)
session.commit()
except Exception as e: #catch all exceptions
print( "Session Commit Errors: %s" % str(e) )
# go to select schema page
return redirect(url_for('schemalist',profile_id=profile.id))
return render_template('profile_add.html', form = form)
@app.route("/profileupdate/<profile_id>/", methods=['GET','POST'])
def profileupdate(profile_id):
profile = session.query(Profile).filter(Profile.id == profile_id).first()
form = ProfileHeaderForm(request.form, obj=profile)
form.source_connection.query = session.query(Connections.connection_name).order_by(Connections.connection_name)
if request.method == 'POST' and form.validate_on_submit():
# The request is POST
# get the values from form and save
form.populate_obj(profile)
session.flush
session.commit()
return redirect(url_for('profilelist'))
# The request is GET
return render_template('profile_update.html', form = form)
@app.route("/profileobjects/<profile_name>/<version>")
def profileobjects(profile_name,version):
profile_header = session.query(Profile).filter(Profile.profile_name == profile_name, Profile.version == version ).first()
profile_details = session.query(SourceSystemProfile).filter(SourceSystemProfile.profile_name == profile_name, SourceSystemProfile.version == version ).order_by(SourceSystemProfile.object_seq)
# pass the list of process control detail table entries to html
return render_template('profile_objects_list.html', profile_details = profile_details, profile_header = profile_header)
@app.route("/profileitemupdate/<profile_id>/", methods=['GET','POST'])
def profileitemupdate(profile_id):
profile = session.query(SourceSystemProfile).filter(SourceSystemProfile.id == profile_id).first()
form = ProfileItemForm(request.form, obj=profile)
if request.method == 'POST' and form.validate_on_submit():
# The request is POST
# get the values from form and save
form.populate_obj(profile)
session.flush
session.commit()
return redirect(url_for('profileobjects',profile_name=profile.profile_name,version=profile.version))
# The request is GET
return render_template('profile_item_update.html', form = form, profile = profile)
@app.route("/profileitemdelete/<profile_id>/", methods=['GET','POST'])
@login_required
def profileitemdelete(profile_id):
profile = session.query(SourceSystemProfile).filter(SourceSystemProfile.id == profile_id).first()
if request.method == 'POST':
# The request is POST and DELETE is confirmed
# Delete the curent record
session.delete(profile)
session.commit()
return redirect(url_for('profileobjects',profile_name=profile.profile_name,version=profile.version))
# The request is GET
return render_template('profile_item_delete.html', profile = profile)
@app.route("/profileitemadd/<profile_name>/<version>")
@login_required
def profileitemadd(profile_name,version):
profile = session.query(SourceSystemProfile).filter(SourceSystemProfile.profile_name == profile_name, SourceSystemProfile.version == version ).first()
# get the profile name and version for next query
# Find the next object_seq number
profile_details = session.query(SourceSystemProfile).filter(SourceSystemProfile.profile_name == profile_name, SourceSystemProfile.version == version ).order_by(SourceSystemProfile.object_seq.desc()).first()
next_object_seq = profile_details.object_seq + 1
new_profile_item = SourceSystemProfile()
# inherit header fields from parent record
new_profile_item.profile_name = profile.profile_name
new_profile_item.version = version
new_profile_item.source_system_code = profile.source_system_code
new_profile_item.source_region = profile.source_region
new_profile_item.target_region = profile.target_region
# Default values
new_profile_item.active_ind = 'Y'
# allocate the object sequence
new_profile_item.object_seq = next_object_seq
form = ProfileItemForm(request.form, obj=new_profile_item)
if request.method == 'POST' and form.validate_on_submit():
# The request is POST
# get the values from form and save
form.populate_obj(new_profile_item)
# Update date time fields from System date time values
now = datetime.now()
new_profile_item.last_updated = now
new_profile_item.last_applied = now
new_profile_item.last_history_update = now
session.add(new_profile_item)
session.commit()
return redirect(url_for('profileobjects',profile_name=profile.profile_name,version=profile.version))
# The request is GET
return render_template('profile_item_add.html', form = form, profile=profile)
@app.route("/connections", methods=['GET','POST'])
def connections():
connections = session.query(Connections).order_by(Connections.connection_name)
if request.method == 'POST' :
# apply filters
filter_category = request.form['category']
filter_database = request.form['database']
if filter_category:
if filter_category != 'ALL':
connections = connections.filter(Connections.connection_category==filter_category)
if filter_database:
if filter_database != 'ALL':
connections = connections.filter(Connections.database_type==filter_database)
# pass the list of connection table entries to html
return render_template('connections_list.html', connections = connections)
@app.route("/addconnection", methods=['GET','POST'])
@login_required
def addconnection():
# create an empty object
connection = Connections()
# initialize the form
form = ConnectionForm(request.form, obj=connection)
if request.method == 'POST' and form.validate_on_submit():
# The request is POST
# get the values from form and save
form.populate_obj(connection)
# Update date time fields from System date time values
now = datetime.now()
connection.created_date = now
# TODO: this method will be availbale to admin users only.
# so the following is for test implementation only
connection.created_by = 'admin'
session.add(connection)
session.commit()
# go back to Connections List view page
return redirect(url_for('connections'))
return render_template('connections_add.html', form = form)
@app.route("/updateconnection/<connection_id>/", methods=['GET','POST'])
@login_required
def updateconnection(connection_id):
connection = session.query(Connections).filter(Connections.id == connection_id).first()
form = ConnectionForm(request.form, obj=connection)
if request.method == 'POST' and form.validate_on_submit():
# The request is POST
# get the values from form and save
form.populate_obj(connection)
session.flush
session.commit()
return redirect(url_for('connections'))
| |
pulumi.set(self, "sni_handlers", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsDefaultHandlerArgs:
def __init__(__self__, *,
certificate_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
http_handler: Optional[pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerArgs']] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] certificate_ids: Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated
with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used.
:param pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerArgs'] http_handler: HTTP handler resource. The structure is documented below.
"""
pulumi.set(__self__, "certificate_ids", certificate_ids)
if http_handler is not None:
pulumi.set(__self__, "http_handler", http_handler)
@property
@pulumi.getter(name="certificateIds")
def certificate_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated
with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used.
"""
return pulumi.get(self, "certificate_ids")
@certificate_ids.setter
def certificate_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "certificate_ids", value)
@property
@pulumi.getter(name="httpHandler")
def http_handler(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerArgs']]:
"""
HTTP handler resource. The structure is documented below.
"""
return pulumi.get(self, "http_handler")
@http_handler.setter
def http_handler(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerArgs']]):
pulumi.set(self, "http_handler", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerArgs:
def __init__(__self__, *,
allow_http10: Optional[pulumi.Input[bool]] = None,
http2_options: Optional[pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerHttp2OptionsArgs']] = None,
http_router_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] allow_http10: If set, will enable only HTTP1 protocol with HTTP1.0 support.
:param pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerHttp2OptionsArgs'] http2_options: If set, will enable HTTP2 protocol for the handler. The structure is documented below.
:param pulumi.Input[str] http_router_id: HTTP router id.
"""
if allow_http10 is not None:
pulumi.set(__self__, "allow_http10", allow_http10)
if http2_options is not None:
pulumi.set(__self__, "http2_options", http2_options)
if http_router_id is not None:
pulumi.set(__self__, "http_router_id", http_router_id)
@property
@pulumi.getter(name="allowHttp10")
def allow_http10(self) -> Optional[pulumi.Input[bool]]:
"""
If set, will enable only HTTP1 protocol with HTTP1.0 support.
"""
return pulumi.get(self, "allow_http10")
@allow_http10.setter
def allow_http10(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_http10", value)
@property
@pulumi.getter(name="http2Options")
def http2_options(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerHttp2OptionsArgs']]:
"""
If set, will enable HTTP2 protocol for the handler. The structure is documented below.
"""
return pulumi.get(self, "http2_options")
@http2_options.setter
def http2_options(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerHttp2OptionsArgs']]):
pulumi.set(self, "http2_options", value)
@property
@pulumi.getter(name="httpRouterId")
def http_router_id(self) -> Optional[pulumi.Input[str]]:
"""
HTTP router id.
"""
return pulumi.get(self, "http_router_id")
@http_router_id.setter
def http_router_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_router_id", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsDefaultHandlerHttpHandlerHttp2OptionsArgs:
def __init__(__self__, *,
max_concurrent_streams: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_concurrent_streams: Maximum number of concurrent streams.
"""
if max_concurrent_streams is not None:
pulumi.set(__self__, "max_concurrent_streams", max_concurrent_streams)
@property
@pulumi.getter(name="maxConcurrentStreams")
def max_concurrent_streams(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of concurrent streams.
"""
return pulumi.get(self, "max_concurrent_streams")
@max_concurrent_streams.setter
def max_concurrent_streams(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_streams", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsSniHandlerArgs:
def __init__(__self__, *,
handler: pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerArgs'],
name: pulumi.Input[str],
server_names: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerArgs'] handler: HTTP handler that sets plaintext HTTP router. The structure is documented below.
:param pulumi.Input[str] name: name of SNI match.
:param pulumi.Input[Sequence[pulumi.Input[str]]] server_names: A set of server names.
"""
pulumi.set(__self__, "handler", handler)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "server_names", server_names)
@property
@pulumi.getter
def handler(self) -> pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerArgs']:
"""
HTTP handler that sets plaintext HTTP router. The structure is documented below.
"""
return pulumi.get(self, "handler")
@handler.setter
def handler(self, value: pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerArgs']):
pulumi.set(self, "handler", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name of SNI match.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="serverNames")
def server_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A set of server names.
"""
return pulumi.get(self, "server_names")
@server_names.setter
def server_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "server_names", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsSniHandlerHandlerArgs:
def __init__(__self__, *,
certificate_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
http_handler: Optional[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerArgs']] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] certificate_ids: Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated
with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used.
:param pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerArgs'] http_handler: HTTP handler resource. The structure is documented below.
"""
pulumi.set(__self__, "certificate_ids", certificate_ids)
if http_handler is not None:
pulumi.set(__self__, "http_handler", http_handler)
@property
@pulumi.getter(name="certificateIds")
def certificate_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated
with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used.
"""
return pulumi.get(self, "certificate_ids")
@certificate_ids.setter
def certificate_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "certificate_ids", value)
@property
@pulumi.getter(name="httpHandler")
def http_handler(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerArgs']]:
"""
HTTP handler resource. The structure is documented below.
"""
return pulumi.get(self, "http_handler")
@http_handler.setter
def http_handler(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerArgs']]):
pulumi.set(self, "http_handler", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerArgs:
def __init__(__self__, *,
allow_http10: Optional[pulumi.Input[bool]] = None,
http2_options: Optional[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerHttp2OptionsArgs']] = None,
http_router_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] allow_http10: If set, will enable only HTTP1 protocol with HTTP1.0 support.
:param pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerHttp2OptionsArgs'] http2_options: If set, will enable HTTP2 protocol for the handler. The structure is documented below.
:param pulumi.Input[str] http_router_id: HTTP router id.
"""
if allow_http10 is not None:
pulumi.set(__self__, "allow_http10", allow_http10)
if http2_options is not None:
pulumi.set(__self__, "http2_options", http2_options)
if http_router_id is not None:
pulumi.set(__self__, "http_router_id", http_router_id)
@property
@pulumi.getter(name="allowHttp10")
def allow_http10(self) -> Optional[pulumi.Input[bool]]:
"""
If set, will enable only HTTP1 protocol with HTTP1.0 support.
"""
return pulumi.get(self, "allow_http10")
@allow_http10.setter
def allow_http10(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_http10", value)
@property
@pulumi.getter(name="http2Options")
def http2_options(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerHttp2OptionsArgs']]:
"""
If set, will enable HTTP2 protocol for the handler. The structure is documented below.
"""
return pulumi.get(self, "http2_options")
@http2_options.setter
def http2_options(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerHttp2OptionsArgs']]):
pulumi.set(self, "http2_options", value)
@property
@pulumi.getter(name="httpRouterId")
def http_router_id(self) -> Optional[pulumi.Input[str]]:
"""
HTTP router id.
"""
return pulumi.get(self, "http_router_id")
@http_router_id.setter
def http_router_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_router_id", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsSniHandlerHandlerHttpHandlerHttp2OptionsArgs:
def __init__(__self__, *,
max_concurrent_streams: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_concurrent_streams: Maximum number of concurrent streams.
"""
if max_concurrent_streams is not None:
pulumi.set(__self__, "max_concurrent_streams", max_concurrent_streams)
@property
@pulumi.getter(name="maxConcurrentStreams")
def max_concurrent_streams(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of concurrent streams.
"""
return pulumi.get(self, "max_concurrent_streams")
@max_concurrent_streams.setter
def max_concurrent_streams(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_streams", value)
@pulumi.input_type
class AlbTargetGroupTargetArgs:
def __init__(__self__, *,
ip_address: pulumi.Input[str],
subnet_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] ip_address: IP address of the target.
:param pulumi.Input[str] subnet_id: ID of the subnet that targets are connected to.
All targets in the target group must be connected to the same subnet within a single availability zone.
"""
pulumi.set(__self__, "ip_address", ip_address)
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Input[str]:
"""
IP address of the target.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
ID of the subnet that targets are connected to.
All targets in the target group must be connected to the same subnet within a single availability zone.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class AlbVirtualHostModifyRequestHeaderArgs:
def __init__(__self__, *,
append: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
remove: Optional[pulumi.Input[bool]] = None,
replace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] append: Append string to the header value.
:param pulumi.Input[str] name: name of the route.
:param pulumi.Input[bool] remove: If set, remove the header.
:param pulumi.Input[str] replace: New value for a header. Header values support the following
[formatters](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#custom-request-response-headers).
"""
if append is not None:
pulumi.set(__self__, "append", append)
if name is not None:
pulumi.set(__self__, "name", name)
if remove is not None:
pulumi.set(__self__, "remove", remove)
if replace is not None:
pulumi.set(__self__, "replace", replace)
@property
@pulumi.getter
def append(self) -> Optional[pulumi.Input[str]]:
"""
Append string to the header value.
"""
return pulumi.get(self, "append")
@append.setter
def append(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "append", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
name of the route.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def remove(self) -> Optional[pulumi.Input[bool]]:
"""
If set, remove the header.
"""
return pulumi.get(self, "remove")
@remove.setter
def remove(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "remove", value)
@property
@pulumi.getter
def replace(self) -> Optional[pulumi.Input[str]]:
"""
New value for a header. Header values support the following
[formatters](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#custom-request-response-headers).
"""
return pulumi.get(self, "replace")
@replace.setter
def replace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replace", value)
@pulumi.input_type
class AlbVirtualHostModifyResponseHeaderArgs:
def __init__(__self__, *,
append: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
remove: Optional[pulumi.Input[bool]] = None,
replace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] append: Append string to the header value.
:param pulumi.Input[str] name: name of the route.
:param pulumi.Input[bool] remove: If set, remove the header.
:param pulumi.Input[str] replace: New value for a header. Header values support the following
[formatters](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#custom-request-response-headers).
"""
if append is not None:
pulumi.set(__self__, "append", append)
if name is not None:
pulumi.set(__self__, "name", name)
if remove is not None:
pulumi.set(__self__, "remove", remove)
if replace is not None:
pulumi.set(__self__, "replace", replace)
@property
@pulumi.getter
def append(self) -> Optional[pulumi.Input[str]]:
"""
Append string to the header value.
| |
<filename>easybase/table.py<gh_stars>0
"""
EasyBase table module.
"""
import time
import logging
from numbers import Integral
from operator import attrgetter
from struct import Struct
from hbase.ttypes import TScan, TGet, TColumnValue, TPut, TColumn, TTimeRange, TDelete
from .util import thrift_type_to_dict, str_increment, OrderedDict
logger = logging.getLogger(__name__)
make_cell = attrgetter('value')
make_cell_timestamp = attrgetter('value', 'timestamp')
pack_i64 = Struct('>q').pack
def make_timerange(ts):
"""Make ttypes.TTimeRange format for ts.
:param list ts: list include at least one timestamp element
:return TTimeRange or None if ts is None
"""
if ts is None:
return ts
if not isinstance(ts, (tuple, list)):
raise TypeError("'timerange' must be list or tuple")
if len(ts) == 1:
# only one element, take it as min timestamp
ts[1]=time.now()
return TTimeRange(minStamp=ts[0],maxStamp=ts[1])
def make_columns(cols):
"""Make a ttypes.TColumn format for cols
:param list_or_tuple cols: list of columns ,former of ['cf1:c2','cf2:c2',....]
:return list: list of TColumns or None if cols is None
"""
if cols is None:
return cols
if not isinstance(cols, (tuple, list)):
raise TypeError("'columns' must be list or tuple")
columns=[]
for c in cols:
f, q = c.split(':')
columns.append(TColumn(family=f, qualifier=q))
return columns
def make_columnvalue(data):
"""Make a ttypes.TColumnValues format for data
:param dict data: a dict of columns value ,e.g {'cf2:c3': 'v3', 'cf1:c1': 'v1', 'cf1:c2': 'v2'}
:return list of TColumnValue
"""
cols = []
for column, value in data.iteritems():
f, q = column.split(":")
cols.append(TColumnValue(family=f, qualifier=q, value=value))
return cols
def make_row(cell_map, include_timestamp):
"""Make a row dict for a cell mapping like ttypes.TRowResult.columns.
[TColumnValue(value='v1', tags=None, qualifier='c1', family='cf1', timestamp=1464971511298),
TColumnValue(value='v2', tags=None, qualifier='c2', family='cf1', timestamp=1464971511298),
TColumnValue(value='5', tags=None, qualifier='c3', family='cf2', timestamp=1464971511298)]
"""
rs = []
tmp_d = dict()
for r in cell_map:
tmp_d[r.family + ':' + r.qualifier] = r.value
tmp_d['timestamp'] = r.timestamp
rs.append(tmp_d)
return rs
#cellfn = include_timestamp and make_cell_timestamp or make_cell
#return dict((cn, cellfn(cell)) for cn, cell in cell_map.iteritems())
def make_ordered_row(sorted_columns, include_timestamp):
"""Make a row dict for sorted column results from scans."""
cellfn = include_timestamp and make_cell_timestamp or make_cell
return OrderedDict(
(column.columnName, cellfn(column.cell))
for column in sorted_columns)
class Table(object):
"""HBase table abstraction class.
This class cannot be instantiated directly; use :py:meth:`Connection.table`
instead.
"""
def __init__(self, name, connection):
self.name = name
self.connection = connection
def __repr__(self):
return '<%s.%s name=%r>' % (
__name__,
self.__class__.__name__,
self.name,
)
def families(self):
"""Retrieve the column families for this table.
:return: Mapping from column family name to settings dict
:rtype: dict
"""
descriptors = self.connection.client.getColumnDescriptors(self.name)
families = dict()
for name, descriptor in descriptors.items():
name = name.rstrip(':')
families[name] = thrift_type_to_dict(descriptor)
return families
def _column_family_names(self):
"""Retrieve the column family names for this table (internal use)"""
names = self.connection.client.getColumnDescriptors(self.name).keys()
return [name.rstrip(':') for name in names]
#
# Data retrieval
#
def row(self, row, columns=None, timestamp=None, timerange=None, maxversions=1, include_timestamp=False):
"""Retrieve a single row of data.
This method retrieves the row with the row key specified in the `row`
argument and returns the columns and values for this row as
a dictionary.
The `row` argument is the row key of the row. If the `columns` argument
is specified, only the values for these columns will be returned
instead of all available columns. The `columns` argument should be
a list or tuple containing strings. Each name can be a column family,
such as `cf1` or `cf1:` (the trailing colon is not required), or
a column family with a qualifier, such as `cf1:col1`.
If specified, the `timestamp` argument specifies the maximum version
that results may have. The `include_timestamp` argument specifies
whether cells are returned as single values or as `(value, timestamp)`
tuples.
:param str row: the row key
:param list_or_tuple columns: list of columns (optional)
:param int timestamp: timestamp (optional)
:param list_or_tuple timerange: list of timestamp ,ONLY include 2 elements(option)
:param bool include_timestamp: whether timestamps are returned
:return: Mapping of columns (both qualifier and family) to values
:rtype: dict
"""
if columns is not None and not isinstance(columns, (tuple, list)):
raise TypeError("'columns' must be a tuple or list")
if timerange is not None and not isinstance(timerange,(tuple, list)):
raise TypeError("'timerange' must be a tuple or list")
cols = make_columns(columns)
tt = make_timerange(timerange)
tget=TGet(row=row,columns=cols,timestamp=timestamp,timeRange=tt,maxVersions=maxversions)
result = self.connection.client.get(self.name,tget)
if not result:
return {}
return make_row(result.columnValues, include_timestamp)
def rows(self, rows, columns=None, timestamp=None, timerange=None, maxversions=1,
include_timestamp=False):
"""Retrieve multiple rows of data.
This method retrieves the rows with the row keys specified in the
`rows` argument, which should be should be a list (or tuple) of row
keys. The return value is a list of `(row_key, row_dict)` tuples.
The `columns`, `timestamp` and `include_timestamp` arguments behave
exactly the same as for :py:meth:`row`.
:param list rows: list of row keys
:param list_or_tuple columns: list of columns (optional)
:param int timestamp: timestamp (optional)
:param bool include_timestamp: whether timestamps are returned
:return: List of mappings (columns to values)
:rtype: list of dicts
"""
if not rows:
# Avoid round-trip if the result is empty anyway
return {}
tgets=[]
for r in rows:
tgets.append(TGet(row=r,columns=columns,timestamp=timestamp,timeRange=timerange,maxVersions=maxversions))
results = self.connection.client.getMultiple(self.name, tgets)
return [(r.row, make_row(r.columnValues, include_timestamp))
for r in results]
def scan(self, row_start=None, row_stop=None, row_prefix=None,
columns=None, filter=None, timerange=None,
include_timestamp=False, batch_size=1000, scan_batching=None,
limit=None, reversed=False):
"""Create a scanner for data in the table.
This method returns an iterable that can be used for looping over the
matching rows. Scanners can be created in two ways:
* The `row_start` and `row_stop` arguments specify the row keys where
the scanner should start and stop. It does not matter whether the
table contains any rows with the specified keys: the first row after
`row_start` will be the first result, and the last row before
`row_stop` will be the last result. Note that the start of the range
is inclusive, while the end is exclusive.
Both `row_start` and `row_stop` can be `None` to specify the start
and the end of the table respectively. If both are omitted, a full
table scan is done. Note that this usually results in severe
performance problems.
* Alternatively, if `row_prefix` is specified, only rows with row keys
matching the prefix will be returned. If given, `row_start` and
`row_stop` cannot be used.
The `columns`, `timestamp` and `include_timestamp` arguments behave
exactly the same as for :py:meth:`row`.
The `filter` argument may be a filter string that will be applied at
the server by the region servers.
If `limit` is given, at most `limit` results will be returned.
The `batch_size` argument specifies how many results should be
retrieved per batch when retrieving results from the scanner. Only set
this to a low value (or even 1) if your data is large, since a low
batch size results in added round-trips to the server.
The optional `scan_batching` is for advanced usage only; it
translates to `Scan.setBatching()` at the Java side (inside the
Thrift server). By setting this value rows may be split into
partial rows, so result rows may be incomplete, and the number
of results returned by te scanner may no longer correspond to
the number of rows matched by the scan.
If `sorted_columns` is `True`, the columns in the rows returned
by this scanner will be retrieved in sorted order, and the data
will be stored in `OrderedDict` instances.
**Compatibility notes:**
* The `filter` argument is only available when using HBase 0.92
(or up). In HBase 0.90 compatibility mode, specifying
a `filter` raises an exception.
* The `sorted_columns` argument is only available when using
HBase 0.96 (or up).
.. versionadded:: 0.8
`sorted_columns` argument
.. versionadded:: 0.8
`scan_batching` argument
:param str row_start: the row key to start at (inclusive)
:param str row_stop: the row key to stop at (exclusive)
:param str row_prefix: a prefix of the row key that must match
:param list_or_tuple columns: list of columns (optional)
:param str filter: a filter string (optional)
:param list_or_tuple timerange: time range(optional)
:param bool include_timestamp: whether timestamps are returned
:param int batch_size: batch size for retrieving resuls
:param bool scan_batching: server-side scan batching (optional)
:param int limit: max number of rows to return
:param bool reversed: whether to reversed
:return: generator yielding the rows matching the scan
:rtype: iterable of `(row_key, row_data)` tuples
"""
if batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if limit is not None and limit | |
dtype='double'
"""
if self._id is None:
self._show_error_not_initizalied()
if f.ndim != 3:
msg = "Force array has to be three dimensions."
raise RuntimeError(msg)
alm.set_f_train(self._id, np.array(f, dtype='double', order='C'))
def set_training_data(self, u, f):
"""Set displacements and respective forces in supercell.
Parameters
----------
u : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double'
shape=(supercells, num_atoms, 3)
f : array_like
Forces in supercells.
dtype='double'
shape=(supercells, num_atoms, 3)
"""
self.displacements = u
self.forces = f
def set_displacement_and_force(self, u, f):
warnings.warn("set_displacement_and_force is deprecated. "
"Use set_training_data.", DeprecationWarning)
self.set_training_data(u, f)
def define(self, maxorder, cutoff_radii=None, nbody=None,
symmetrization_basis='Lattice'):
"""Define the Taylor expansion potential.
Parameters
----------
maxorder : int
Maximum order of the Taylor expansion potential.
- If ``maxorder = 1``, only harmonic (2nd-order) terms are
considered.
- If ``maxorder = 2``, both harmonic and cubic terms are
considered.
cutoff_radii : array_like, default = None
Cutoff radii defined for each order.
When a negative value is provided, the cutoff radius is not used.
dtype='double'
shape=(maxorder, num_elems, num_elems)
nbody : array_like, default = None
Option to neglect multi-body interactions.
dtype='intc'
shape=(maxorder,)
symmetrization_basis : str, default='Lattice'
Either 'Cartesian' or 'Lattice'. Symmetrization of force constants
is done either in the matrix based on crystal coordinates
('Lattice') or Cartesian coordinates ('Cartesian').
"""
if self._id is None:
self._show_error_not_initizalied()
self._transfer_parameters()
if nbody is None:
nbody = []
for i in range(maxorder):
nbody.append(i + 2)
else:
if len(nbody) != maxorder:
msg = "The size of nbody must be equal to maxorder."
raise RuntimeError(msg)
if cutoff_radii is None:
_cutoff_radii = None
else:
_cutoff_radii = np.array(cutoff_radii, dtype='double', order='C')
nelem = len(_cutoff_radii.ravel())
if (nelem // maxorder) * maxorder != nelem:
msg = "The array shape of cutoff_radii is wrong."
raise RuntimeError(msg)
nkd = int(round(np.sqrt(nelem // maxorder)))
if nkd ** 2 - nelem // maxorder != 0:
msg = "The array shape of cutoff_radii is wrong."
raise RuntimeError(msg)
_cutoff_radii = np.reshape(_cutoff_radii, (maxorder, nkd, nkd),
order='C')
self._maxorder = maxorder
if symmetrization_basis.lower() in ['lattice', 'cartesian']:
fc_basis = symmetrization_basis.capitalize()
else:
fc_basis = 'Lattice'
alm.define(self._id,
maxorder,
np.array(nbody, dtype='intc'),
_cutoff_radii,
fc_basis)
alm.init_fc_table(self._id)
self._defined = True
def set_constraint(self, translation=True, rotation=False):
"""Set constraints for the translational and rotational invariances
Parameters
----------
translation : bool, optional (default = True)
When set to ``True``, the translational invariance
(aka acoustic sum rule) is imposed between force constants.
rotation : bool, optional (default = False)
When set to ``True``, the rotational invariance is imposed between
force constants. This function is not implemented.
"""
if rotation is True:
raise("Rotational invariance is not supported in python API.")
if translation is True:
iconst = 11
else:
iconst = 10
self._iconst = iconst
alm.set_constraint_type(self._id, self._iconst)
def getmap_primitive_to_supercell(self):
"""Returns the mapping information from the primitive cell to the supercell.
Returns
-------
map_p2s : array_like
The mapping information of atoms from the primitive cell to the
supercell.
dtype='intc'
shape = (num_trans, num_atoms_primitive)
"""
if self._id is None:
self._show_error_not_initizalied()
if not self._defined:
self._show_error_not_defined()
map_p2s = np.zeros(len(self._xcoord), dtype='intc')
ntrans = alm.get_atom_mapping_by_pure_translations(self._id, map_p2s)
return map_p2s.reshape((ntrans, -1))
def get_displacement_patterns(self, fc_order):
"""Returns the displacement patterns to obtain force constants.
Parameters
----------
fc_order : int
The order of force constants to get the displacement patterns.
- If ``fc_order = 1``, returns patterns for harmonic force
constants.
- If ``fc_order = 2``, returns patterns for cubic force constants.
- If ``fc_order = 3``, returns patterns for quartic force
constants.
- ...
Returns
-------
all_disps : array_like, shape = (n_patterns,)
The array of tuples (``atom_index``, ``direction``, ``basis``),
where ``direction`` is the numpy.ndarray of size = (3,)
representing the direction of the displacement,
and ``basis`` is a string either "Cartesian" or "Fractional".
"""
if self._id is None:
self._show_error_not_initizalied()
if fc_order > self._maxorder:
msg = ("The fc_order must not be larger than the maximum order "
"(maxorder).")
raise ValueError(msg)
numbers = self._get_number_of_displaced_atoms(fc_order)
tot_num = np.sum(numbers)
atom_indices = np.zeros(tot_num, dtype='intc')
disp_patterns = np.zeros((tot_num, 3), dtype='double', order='C')
nbasis = alm.get_displacement_patterns(self._id,
atom_indices,
disp_patterns,
fc_order)
basis = ["Cartesian", "Fractional"][nbasis]
all_disps = []
pos = 0
for num in numbers:
disp = []
for i in range(num):
disp.append((atom_indices[pos], disp_patterns[pos], basis))
pos += 1
all_disps.append(disp)
return all_disps
def get_fc(self, fc_order, mode="origin", permutation=True):
"""Returns the force constant values
Parameters
----------
fc_order : int
The order of force constants to get.
- If ``fc_order = 1``, returns harmonic force constants.
- If ``fc_order = 2``, returns cubic force constants.
- If ``fc_order = 3``, returns quartic force constants.
- ...
mode : str, optional (default="origin")
The choice of the force constant list to be returned.
- If "origin", returns the reducible set of force constants,
whose first element corresponds to an atom in the
primitive cell at the origin.
- If "all", returns the all non-zero elements of force constants
in the supercell.
- If "irreducible" or "irred", returns the irreducible set of
force constants.
permutation : bool (default=True)
The flag for printing out elements with permutation symmetry.
Effective only when ``mode = origin`` or ``mode = all``.
- If True, returns force constants after replicating elements
by the permutation of indices.
- If False, returns force constants without replicating elements
by the permutation of indices. For "origin" and "all", all
indices except the first index participate to the permutation
of indices to reduce the number of the output values.
Returns
-------
fc_values : array_like, dtype='double', shape=(num_fc,)
Force constant values.
elem_indices : array_like, dtype='int', shape=(num_fc, fc_order + 1)
Array of flattened indices 3 * index_atom + index_xyz.
Note
----
This method returns force constants in Cartesian basis
when ``mode = origin`` and ``mode = all`.
When ``mode = irred``, it returns the irreducible set of
force constants in the basis defined via "symmetrization_basis"
of the alm.define method.
"""
if self._id is None:
self._show_error_not_initizalied()
if fc_order > self._maxorder:
msg = ("The fc_order must not be larger than the maximum order "
"(maxorder).")
raise ValueError(msg)
perm_int = permutation * 1
if mode == "origin":
fc_length = self._get_number_of_fc_origin(fc_order, perm_int)
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_origin(self._id, fc_values, elem_indices, perm_int)
return fc_values, elem_indices
elif mode == "irreducible" or mode == "irred":
fc_length = self._get_number_of_irred_fc_elements(fc_order)
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_irreducible(self._id, fc_values, elem_indices)
return fc_values, elem_indices
elif mode == "all":
map_p2s = np.zeros(len(self._xcoord), dtype='intc')
ntrans = alm.get_atom_mapping_by_pure_translations(self._id,
map_p2s)
fc_length = self._get_number_of_fc_origin(
fc_order, perm_int) * ntrans
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_all(self._id, fc_values, elem_indices, perm_int)
return fc_values, elem_indices
else:
raise ValueError("Invalid mode in get_fc.")
def set_fc(self, fc_in):
"""Copy force constant obtained by an external optimizer to the ALM instance.
Parameters
----------
fc_in : array_like
The irreducible set of force constants.
dtype='double'
shape=(num_fc,)
Note
----
When an external optimizer, such as numpy.linalg.lstsq, is used to fit
force constants, the force constants need to be passed to
the ALM instance by ``set_fc`` to use the ``get_fc`` method.
"""
if self._id is None:
self._show_error_not_initizalied()
maxorder = self._maxorder
fc_length_irred = 0
for i in range(maxorder):
fc_length_irred += self._get_number_of_irred_fc_elements(i + 1)
if fc_length_irred != len(fc_in):
msg = "The size of the given force constant array is incorrect."
raise RuntimeError(msg)
alm.set_fc(self._id, np.array(fc_in, dtype='double', order='C'))
def get_matrix_elements(self):
"""Returns the sensing matrix A and force vector b
Returns
-------
amat : ndarray, dtype='double'
shape=(3 * num_atoms * ndata_training, num_fc_irred), order='F'.
The sensing matrix A calculated from the displacements.
bvec : ndarray, dtype='double'
shape=(3 * num_atoms * ndata_training,)
The vector b calculated from the atomic forces.
Note
----
From the amat (``A``) and bvec (``b``), the force constant vector ``x``
can be obtained by solving the least-square problem:
x = argmin_{x} | Ax-b|^{2}.
"""
if self._id is None:
self._show_error_not_initizalied()
maxorder = self._maxorder
nrows = self._get_nrows_amat()
fc_length = 0
for i in range(maxorder):
fc_length += self._get_number_of_irred_fc_elements(i + 1)
amat = np.zeros(nrows * fc_length, dtype='double', order='C')
bvec = np.zeros(nrows, dtype='double')
alm.get_matrix_elements(self._id, amat, bvec)
return (np.reshape(amat, (nrows, fc_length), order='F'), bvec)
@property
def cv_l1_alpha(self):
"""Returns L1 alpha at minimum CV"""
if self._id is None:
self._show_error_not_initizalied()
| |
{}
for request in requests:
message_id = self._queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self._queue_request('server.banner', [])
self._queue_request('server.donation_address', [])
self._queue_request('server.peers.subscribe', [])
self._queue_request('blockchain.relayfee', [])
for h in self.subscribed_addresses:
self._queue_request('blockchain.scripthash.subscribe', [h])
def _get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def _notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self._get_status_value(key))
# Called by daemon.py:run_daemon()
# Called by gui.qt.main_window.py:donate_to_server()
# Called by gui.qt.network_dialog.py:update()
# Called by gui.qt.network_dialog.py:fill_in_proxy_settings()
# Called by gui.qt.network_dialog.py:follow_server()
# Called by gui.qt.network_dialog.py:set_server()
# Called by gui.qt.network_dialog.py:set_proxy()
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
# Called by gui.qt.main_window.py:donate_to_server()
def get_donation_address(self):
if self.is_connected():
return self.donation_address
# Called by daemon.py:run_daemon()
# Called by gui.qt.network_dialog.py:update()
# Called by scripts/util.py
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
# Called by commands.py:getservers()
# Called by gui.qt.network_dialog.py:update()
def get_servers(self):
out = Net.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def _start_interface(self, server_key):
"""Start the given server if it is not already active or being connected to.
Arguments:
server_key --- server specifier in the form of '<host>:<port>:<protocol>'
"""
if (not server_key in self.interfaces and not server_key in self.connecting):
if server_key == self.default_server:
logger.debug("connecting to %s as new interface", server_key)
self._set_status('connecting')
self.connecting.add(server_key)
c = Connection(server_key, self.socket_queue, self.config.path)
def _get_unavailable_servers(self):
exclude_set = set(self.interfaces)
exclude_set = exclude_set.union(self.connecting)
exclude_set = exclude_set.union(self.disconnected_servers)
exclude_set = exclude_set.union(self.blacklisted_servers)
return exclude_set
def _start_random_interface(self):
exclude_set = self._get_unavailable_servers()
server_key = _pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server_key:
self._start_interface(server_key)
def _start_interfaces(self):
self._start_interface(self.default_server)
for i in range(self.num_server - 1):
self._start_random_interface()
def _set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
logger.debug("setting proxy '%s'", proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM,
6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def _start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
logger.debug('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self._set_proxy(proxy)
self._start_interfaces()
def _stop_network(self):
logger.debug("stopping network")
for interface in list(self.interfaces.values()):
self._close_interface(interface)
if self.interface:
self._close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
# Called by network_dialog.py:follow_server()
# Called by network_dialog.py:set_server()
# Called by network_dialog.py:set_proxy()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = _serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self._stop_network()
self.default_server = server
self._start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server, self.SWITCH_SET_PARAMETERS)
else:
self._switch_lagging_interface()
self._notify('updated')
def _switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def _switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self._server_is_lagging() and self.auto_connect:
# switch to one that has the longest chain
interfaces = self.interfaces_by_blockchain().get(Blockchain.longest())
if interfaces:
choice = random.choice(interfaces)
self.switch_to_interface(choice.server, self.SWITCH_LAGGING)
SWITCH_DEFAULT = 'SWITCH_DEFAULT'
SWITCH_RANDOM = 'SWITCH_RANDOM'
SWITCH_LAGGING = 'SWITCH_LAGGING'
SWITCH_SOCKET_LOOP = 'SWITCH_SOCKET_LOOP'
SWITCH_FOLLOW_CHAIN = 'SWITCH_FOLLOW_CHAIN'
SWITCH_SET_PARAMETERS = 'SWITCH_SET_PARAMETERS'
# Called by network_dialog.py:follow_server()
def switch_to_interface(self, server, switch_reason=None):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self._start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
logger.debug("switching to '%s' reason '%s'", server, switch_reason)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self._close_interface(self.interface)
self.interface = i
self._send_subscriptions()
self._set_status('connected')
self._notify('updated')
def _close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def _add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self._save_recent_servers()
def _process_response(self, interface, request, response, callbacks):
if self.debug:
logger.debug("<-- %s", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
self._on_server_version(interface, result)
elif method == 'blockchain.headers.subscribe':
if error is None:
self._on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self._notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self._notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
logger.debug("relayfee %s", self.relay_fee)
elif method == 'blockchain.block.headers':
self._on_block_headers(interface, request, response)
elif method == 'blockchain.block.header':
self._on_header(interface, request, response)
for callback in callbacks:
callback(response)
def _get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def _process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self._get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self._get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self._connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self._get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
with self.interface_lock:
self.sub_cache[k] = response
# Response is now in canonical form
self._process_response(interface, request, response, callbacks)
# Called by synchronizer.py:subscribe_to_addresses()
def subscribe_to_scripthashes(self, scripthashes, callback):
msgs = [('blockchain.scripthash.subscribe', [sh])
for sh in scripthashes]
self.send(msgs, callback)
# Called by synchronizer.py:on_address_status()
def request_scripthash_history(self, sh, callback):
self.send([('blockchain.scripthash.get_history', [sh])], callback)
# Called by commands.py:notify()
# Called by websockets.py:reading_thread()
# Called by websockets.py:run()
# Called locally.
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
if messages:
with self.pending_sends_lock:
self.pending_sends.append((messages, callback))
def _process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.pending_sends_lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self._get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
logger.debug("cache hit '%s'", k)
callback(r)
else:
message_id = self._queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
# Called by synchronizer.py:release()
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications _process_response() will | |
: 5, 8056048104805248435 : 2, 18446744073709551615 : 0, 12130603730978457510 : 6, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 10789443546307262781 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 2.8026e-44, count = 73), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0), catboost_ctr_mean_history(sum = 0, count = 1), catboost_ctr_mean_history(sum = 0, count = 1), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0)],
ctr_total = [20, 73, 0, 2, 0, 2, 1, 0, 0, 1, 0, 1, 1, 0]
),
14216163332699387099 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 15379737126276794113 : 5, 18446744073709551615 : 0, 14256903225472974739 : 2, 18048946643763804916 : 4, 2051959227349154549 : 3, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 7024059537692152076 : 6, 18446744073709551615 : 0, 15472181234288693070 : 1, 8864790892067322495 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 1.4013e-44, count = 58), catboost_ctr_mean_history(sum = 1.4013e-45, count = 6), catboost_ctr_mean_history(sum = 1.4013e-45, count = 5), catboost_ctr_mean_history(sum = 4.2039e-45, count = 6), catboost_ctr_mean_history(sum = 0, count = 4), catboost_ctr_mean_history(sum = 2.8026e-45, count = 0), catboost_ctr_mean_history(sum = 7.00649e-45, count = 0)],
ctr_total = [10, 58, 1, 6, 1, 5, 3, 6, 0, 4, 2, 0, 5, 0]
),
14216163332699387101 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 18446744073709551615 : 0, 13987540656699198946 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18089724839685297862 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 10353740403438739754 : 2, 3922001124998993866 : 0, 13686716744772876732 : 1, 18293943161539901837 : 3, 18446744073709551615 : 0, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 0, count = 37), catboost_ctr_mean_history(sum = 0, count = 4), catboost_ctr_mean_history(sum = 3.08286e-44, count = 20), catboost_ctr_mean_history(sum = 0, count = 13), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 3)],
ctr_total = [0, 37, 0, 4, 22, 20, 0, 13, 0, 2, 0, 3]
),
14216163332699387103 :
catboost_ctr_value_table(
index_hash_viewer = {3607388709394294015 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18356215166324018775 : 0, 18365206492781874408 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 14559146096844143499 : 1, 18446744073709551615 : 0, 18446744073709551615 : 0, 11416626865500250542 : 3, 5549384008678792175 : 2},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 0, count = 14), catboost_ctr_mean_history(sum = 0, count = 22), catboost_ctr_mean_history(sum = 0, count = 22), catboost_ctr_mean_history(sum = 2.66247e-44, count = 17), catboost_ctr_mean_history(sum = 2.8026e-45, count = 3), catboost_ctr_mean_history(sum = 1.4013e-45, count = 1)],
ctr_total = [0, 14, 0, 22, 0, 22, 19, 17, 2, 3, 1, 1]
),
16890222057671696978 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 18446744073709551615 : 0, 13987540656699198946 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18089724839685297862 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 10353740403438739754 : 2, 3922001124998993866 : 0, 13686716744772876732 : 1, 18293943161539901837 : 3, 18446744073709551615 : 0, 18446744073709551615 : 0},
target_classes_count = 0,
counter_denominator = 42,
ctr_mean_history = [catboost_ctr_mean_history(sum = 5.1848e-44, count = 4), catboost_ctr_mean_history(sum = 5.88545e-44, count = 13), catboost_ctr_mean_history(sum = 2.8026e-45, count = 3)],
ctr_total = [37, 4, 42, 13, 2, 3]
),
16890222057671696979 :
catboost_ctr_value_table(
index_hash_viewer = {7537614347373541888 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 5903587924673389870 : 1, 18278593470046426063 : 6, 10490918088663114479 : 8, 18446744073709551615 : 0, 407784798908322194 : 10, 5726141494028968211 : 3, 1663272627194921140 : 0, 8118089682304925684 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 15431483020081801594 : 9, 18446744073709551615 : 0, 18446744073709551615 : 0, 1403990565605003389 : 2, 3699047549849816830 : 11, 14914630290137473119 : 7},
target_classes_count = 0,
counter_denominator = 28,
ctr_mean_history = [catboost_ctr_mean_history(sum = 4.2039e-45, count = 28), catboost_ctr_mean_history(sum = 2.66247e-44, count = 4), catboost_ctr_mean_history(sum = 2.8026e-44, count = 2), catboost_ctr_mean_history(sum = 5.60519e-45, count = 10), catboost_ctr_mean_history(sum = 4.2039e-45, count = 1), catboost_ctr_mean_history(sum = 7.00649e-45, count = 2)],
ctr_total = [3, 28, 19, 4, 20, 2, 4, 10, 3, 1, 5, 2]
)
}
)
)
### Routines to compute CTRs
def calc_hash(a, b):
max_int = 0xffFFffFFffFFffFF
MAGIC_MULT = 0x4906ba494954cb65
return (MAGIC_MULT * ((a + MAGIC_MULT * b) & max_int)) & max_int
def calc_hashes(binarized_features, hashed_cat_features, transposed_cat_feature_indexes, binarized_feature_indexes):
result = 0
for cat_feature_index in transposed_cat_feature_indexes:
result = calc_hash(result, hashed_cat_features[cat_feature_index])
for bin_feature_index in binarized_feature_indexes:
binary_feature = binarized_features[bin_feature_index.bin_index]
if not(bin_feature_index.check_value_equal):
result = calc_hash(result, 1 if (binary_feature >= bin_feature_index.value) else 0)
else:
result = calc_hash(result, 1 if (binary_feature == bin_feature_index.value) else 0)
return result
def calc_ctrs(model_ctrs, binarized_features, hashed_cat_features, result):
ctr_hash = 0
result_index = 0
for i in range(len(model_ctrs.compressed_model_ctrs)):
proj = model_ctrs.compressed_model_ctrs[i].projection
ctr_hash = calc_hashes(binarized_features, hashed_cat_features, proj.transposed_cat_feature_indexes, proj.binarized_indexes)
for j in range(len(model_ctrs.compressed_model_ctrs[i].model_ctrs)):
ctr = model_ctrs.compressed_model_ctrs[i].model_ctrs[j]
learn_ctr = model_ctrs.ctr_data.learn_ctrs[ctr.base_hash]
ctr_type = ctr.base_ctr_type
bucket = learn_ctr.resolve_hash_index(ctr_hash)
if bucket is None:
result[result_index] = ctr.calc(0, 0)
else:
if ctr_type == "BinarizedTargetMeanValue" or ctr_type == "FloatTargetMeanValue":
ctr_mean_history = learn_ctr.ctr_mean_history[bucket]
result[result_index] = ctr.calc(ctr_mean_history.sum, ctr_mean_history.count)
elif ctr_type == "Counter" or ctr_type == "FeatureFreq":
ctr_total = learn_ctr.ctr_total
denominator = learn_ctr.counter_denominator
result[result_index] = ctr.calc(ctr_total[bucket], denominator)
elif ctr_type == "Buckets":
ctr_history = learn_ctr.ctr_total
target_classes_count = learn_ctr.target_classes_count
total_count = 0
good_count = ctr_history[bucket * target_classes_count + ctr.target_border_idx];
for class_id in range(target_classes_count):
total_count += ctr_history[bucket * target_classes_count + class_id]
result[result_index] = ctr.calc(good_count, total_count)
else:
ctr_history = learn_ctr.ctr_total;
target_classes_count = learn_ctr.target_classes_count;
if target_classes_count > 2:
good_count = 0
total_count = 0
for class_id in range(ctr.target_border_idx + 1):
total_count += ctr_history[bucket * target_classes_count + class_id]
for class_id in range(ctr.target_border_idx + 1, target_classes_count):
good_count += ctr_history[bucket * target_classes_count + class_id]
total_count += good_count;
result[result_index] = ctr.calc(good_count, total_count);
else:
result[result_index] = ctr.calc(ctr_history[bucket * 2 + 1], ctr_history[bucket * 2] + ctr_history[bucket * 2 + 1])
result_index += 1
cat_features_hashes = {
"Female": -2114564283,
"Protective-serv": -2075156126,
"Assoc-voc": -2029370604,
"Married-civ-spouse": -2019910086,
"Federal-gov": -1993066135,
"Transport-moving": -1903253868,
"Farming-fishing": -1888947309,
"Prof-school": -1742589394,
"Self-emp-inc": -1732053524,
"?": -1576664757,
"Handlers-cleaners": -1555793520,
"0": -1438285038,
"Philippines": -1437257447,
"Male": -1291328762,
"11th": -1209300766,
"Unmarried": -1158645841,
"Local-gov": -1105932163,
"Divorced": -993514283,
"Some-college": -870577664,
"Asian-Pac-Islander": -787966085,
"Sales": -760428919,
"Self-emp-not-inc": -661998850,
"Widowed": -651660490,
"Masters": -453513993,
"State-gov": -447941100,
"Doctorate": -434936054,
"White": -218697806,
"Own-child": -189887997,
"Amer-Indian-Eskimo": -86031875,
"Exec-managerial": -26537793,
"Husband": 60472414,
"Italy": 117615621,
"Not-in-family": 143014663,
"n": 239748506,
"Married-spouse-absent": 261588508,
"Prof-specialty": 369959660,
"Assoc-acdm": 475479755,
"Adm-clerical": 495735304,
"Bachelors": 556725573,
"HS-grad": 580496350,
"Craft-repair": 709691013,
"Other-relative": 739168919,
"Other-service": 786213683,
"9th": 840896980,
"Separated": 887350706,
"10th": 888723975,
"Mexico": 972041323,
"Hong": 995245846,
"1": 1121341681,
"Tech-support": 1150039955,
"Black": 1161225950,
"Canada": 1510821218,
"Wife": 1708186408,
"United-States": 1736516096,
"Never-married": 1959200218,
"Machine-op-inspct": 2039859473,
"7th-8th": 2066982375,
"Private": 2084267031,
}
def hash_uint64(string):
return cat_features_hashes.get(str(string), 0x7fFFffFF)
### Applicator for the CatBoost model
def apply_catboost_model(float_features, cat_features=[], ntree_start=0, ntree_end=catboost_model.tree_count):
"""
Applies the model built by CatBoost.
Parameters
----------
float_features : list of float features
cat_features : list of categorical features
You need to pass float and categorical features separately in the same order they appeared in train dataset.
For example if you had features f1,f2,f3,f4, where f2 and f4 were considered categorical, you need to pass here float_features=f1,f3, cat_features=f2,f4
Returns
-------
prediction : formula value for the model and the features
"""
if ntree_end == 0:
ntree_end = catboost_model.tree_count
else:
ntree_end = min(ntree_end, catboost_model.tree_count)
model = catboost_model
assert len(float_features) >= model.float_feature_count
assert len(cat_features) >= model.cat_feature_count
# Binarise features
binary_features = [0] * model.binary_feature_count
binary_feature_index = 0
for i in range(len(model.float_feature_borders)):
for border in model.float_feature_borders[i]:
binary_features[binary_feature_index] += 1 if (float_features[model.float_features_index[i]] > border) else 0
binary_feature_index += 1
transposed_hash = [0] * model.cat_feature_count
for i in range(model.cat_feature_count):
transposed_hash[i] = hash_uint64(cat_features[i])
if len(model.one_hot_cat_feature_index) > 0:
cat_feature_packed_indexes = {}
for i in range(model.cat_feature_count):
cat_feature_packed_indexes[model.cat_features_index[i]] = i
for i in range(len(model.one_hot_cat_feature_index)):
cat_idx = cat_feature_packed_indexes[model.one_hot_cat_feature_index[i]]
hash = transposed_hash[cat_idx]
for border_idx in range(len(model.one_hot_hash_values[i])):
binary_features[binary_feature_index] |= (1 if hash == model.one_hot_hash_values[i][border_idx] else 0) * (border_idx + 1)
binary_feature_index += 1
if hasattr(model, 'model_ctrs') and model.model_ctrs.used_model_ctrs_count > 0:
ctrs | |
<reponame>Geode-solutions/OpenGeode
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2021 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY:
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM:
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, platform
if sys.version_info >= (3,8,0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import opengeode_py_basic as basic
import opengeode_py_geometry as geom
import opengeode_py_mesh as mesh
def test_create_vertices( polygonal_surface, builder ):
builder.create_point( geom.Point3D( [ 0.1, 0.2, 0.3 ] ) )
builder.create_point( geom.Point3D( [ 2.1, 9.4, 6.7 ] ) )
builder.create_point( geom.Point3D( [ 7.5, 5.2, 6.3 ] ) )
builder.create_point( geom.Point3D( [ 8.1, 1.4, 4.7 ] ) )
builder.create_point( geom.Point3D( [ 4.7, 2.1, 1.3 ] ) )
builder.create_point( geom.Point3D( [ 9.3, 5.3, 6.7 ] ) )
builder.create_point( geom.Point3D( [ 7.5, 4.2, 2.8 ] ) )
if polygonal_surface.nb_vertices() != 7:
raise ValueError( "[Test] PolygonalSurface should have 7 vertices" )
def test_bounding_box( polygonal_surface ):
answer_min = geom.Point3D( [ 0.1, 0.2, 0.3 ] )
answer_max = geom.Point3D( [ 9.3, 9.4, 6.7 ] )
if polygonal_surface.bounding_box().min() != answer_min:
raise ValueError( "[Test] Wrong computation of bounding box (min)" )
if polygonal_surface.bounding_box().max() != answer_max:
raise ValueError( "[Test] Wrong computation of bounding box (max)" )
def test_delete_vertex( polygonal_surface, builder ):
to_delete = [False] * polygonal_surface.nb_vertices()
to_delete[0] = True
builder.delete_vertices( to_delete )
if polygonal_surface.nb_vertices() != 6:
raise ValueError( "[Test] PolygonalSurface should have 6 vertices" )
answer = geom.Point3D( [ 2.1, 9.4, 6.7 ] )
if polygonal_surface.point( 2 ) != answer:
raise ValueError( "[Test] PolygonalSurface vertex coordinates are not correct" )
if polygonal_surface.nb_polygons() != 2:
raise ValueError( "[Test] PolygonalSurface should have 2 polygons" )
if polygonal_surface.polygon_adjacent( mesh.PolygonEdge( 1, 2 ) ):
raise ValueError( "[Test] PolygonalSurface adjacent index is not correct" )
builder.edges_builder().delete_isolated_edges()
if polygonal_surface.edges().nb_edges() != 7:
raise ValueError( "[Test] PolygonalSurface should have 7 edges" )
attribute = polygonal_surface.edges().edge_attribute_manager().find_attribute_uint( "test" )
if attribute.value( 4 ) != 6:
raise ValueError( "[Test] Update of edge attributes after "
"vertex deletion is not correct (value of 4)" )
if attribute.value( 5 ) != 7:
raise ValueError( "[Test] Update of edge attributes after "
"vertex deletion is not correct (value of 5)" )
if attribute.value( 6 ) != 8:
raise ValueError( "[Test] Update of edge attributes after "
"vertex deletion is not correct (value of 6)" )
def test_create_polygons( polygonal_surface, builder ):
builder.create_polygon( [ 0, 1, 2 ] )
builder.create_polygon( [ 1, 3, 4, 2 ] )
builder.create_polygon( [ 1, 5, 6, 3 ] )
if polygonal_surface.nb_polygons() != 3:
raise ValueError( "[Test] PolygonalSurface should have 3 polygons" )
if polygonal_surface.edges().nb_edges() != 9:
raise ValueError( "[Test] PolygonalSurface should have 9 edges" )
def test_create_edge_attribute( polygonal_surface ):
attribute = polygonal_surface.edges().edge_attribute_manager().find_or_create_attribute_variable_uint( "test", basic.NO_ID )
for e in range( polygonal_surface.edges().nb_edges() ):
attribute.set_value( e, e )
def test_polygon_adjacencies( polygonal_surface, builder ):
builder.compute_polygon_adjacencies()
if polygonal_surface.polygon_adjacent( mesh.PolygonEdge( 0, 0 ) ):
raise ValueError( "[Test] PolygonalSurface adjacent index is not correct" )
if polygonal_surface.polygon_adjacent( mesh.PolygonEdge( 0, 1 ) ) != 1:
raise ValueError( "[Test] PolygonalSurface adjacent index is not correct" )
if polygonal_surface.polygon_adjacent( mesh.PolygonEdge( 1, 3 ) ) != 0:
raise ValueError( "[Test] PolygonalSurface adjacent index is not correct" )
if polygonal_surface.polygon_adjacent_edge( mesh.PolygonEdge( 0, 1 ) ) != mesh.PolygonEdge( 1, 3 ):
raise ValueError( "[Test] PolygonalSurface adjacent index is not correct" )
if polygonal_surface.polygon_adjacent( mesh.PolygonEdge( 2, 0 ) ):
raise ValueError( "[Test] PolygonalSurface adjacent index is not correct" )
if polygonal_surface.polygon_adjacent( mesh.PolygonEdge( 2, 3 ) ) != 1:
raise ValueError( "[Test] PolygonalSurface adjacent index is not correct" )
if len( polygonal_surface.polygons_around_vertex( 1 ) ) != 3:
raise ValueError( "[Test] PolygonalSurface should have 3 polygons around this vertex" )
def test_polygon_edges_on_borders( polygonal_surface ):
result = polygonal_surface.polygon_edges_on_border( 0 )
if len( result ) != 2:
raise ValueError( "[Test] Number of polygon edges on border index is not correct" )
def test_previous_next_on_border( polygonal_surface ):
if polygonal_surface.previous_on_border( mesh.PolygonEdge( 0, 0 ) ) != mesh.PolygonEdge( 0, 2 ):
raise ValueError( "[Test] Previous edge on border index is not correct" )
if polygonal_surface.next_on_border( mesh.PolygonEdge( 0, 0 ) ) != mesh.PolygonEdge( 2, 0 ):
raise ValueError( "[Test] Next edge on border index is not correct" )
def test_delete_polygon( polygonal_surface, builder ):
to_delete = [False] * polygonal_surface.nb_polygons()
to_delete[0] = True
builder.delete_polygons( to_delete )
if polygonal_surface.nb_polygons() != 1:
raise ValueError( "[Test] PolygonalSurface should have 1 polygon" )
if polygonal_surface.polygon_vertex( mesh.PolygonVertex( 0, 0 ) ) != 4:
raise ValueError( "[Test] PolygonalSurface edge vertex index is not correct" )
if polygonal_surface.polygon_vertex( mesh.PolygonVertex( 0, 1 ) ) != 2:
raise ValueError( "[Test] PolygonalSurface edge vertex index is not correct" )
if polygonal_surface.polygon_vertex( mesh.PolygonVertex( 0, 2 ) ) != 0:
raise ValueError( "[Test] PolygonalSurface edge vertex index is not correct" )
builder.edges_builder().delete_isolated_edges()
if polygonal_surface.edges().nb_edges() != 3:
raise ValueError( "[Test] PolygonalSurface should have 3 edges" )
attribute = polygonal_surface.edges().edge_attribute_manager().find_attribute_uint( "test" )
if attribute.value( 0 ) != 0:
raise ValueError( "[Test] Update of edge attributes after "
"polygon deletion is not correct" )
if attribute.value( 1 ) != 1:
raise ValueError( "[Test] Update of edge attributes after "
"polygon deletion is not correct" )
if attribute.value( 2 ) != 2:
raise ValueError( "[Test] Update of edge attributes after "
"polygon deletion is not correct" )
def test_polygon_barycenter( polygonal_surface ):
answer = geom.Point3D( [ 5.6, 4.525, 4.75 ] )
if polygonal_surface.polygon_barycenter( 1 ) != answer:
raise ValueError( "[Test] PolygonalSurface polygon barycenter is not correct" )
def test_polygon_area():
polygonal_surface = mesh.PolygonalSurface2D.create()
builder = mesh.PolygonalSurfaceBuilder2D.create( polygonal_surface )
a = 6.0
b = 8.0
c = 4.0
builder.create_point( geom.Point2D( [ 0.0, 0.0 ] ) )
builder.create_point( geom.Point2D( [ a, 0.0 ] ) )
builder.create_point( geom.Point2D( [ b, c ] ) )
builder.create_point( geom.Point2D( [ 0.0, c ] ) )
builder.create_polygon( [ 0, 1, 2, 3 ] )
if polygonal_surface.polygon_area( 0 ) != 28:
raise ValueError( "[Test] PolygonalSurface polygon area is not correct" )
def test_polygon_normal():
polygonal_surface = mesh.PolygonalSurface3D.create()
builder = mesh.PolygonalSurfaceBuilder3D.create( polygonal_surface )
a = 6.0
b = 8.0
c = 4.0
builder.create_point( geom.Point3D( [ 0.0, 0.0, 0.0 ] ) )
builder.create_point( geom.Point3D( [ a, 0.0, 0.0 ] ) )
builder.create_point( geom.Point3D( [ b, c, 0.0 ] ) )
builder.create_point( geom.Point3D( [ 0.0, c, 0.0 ] ) )
builder.create_polygon( [ 0, 1, 2, 3 ] )
answer = geom.Vector3D( [ 0., 0., 1. ] )
if polygonal_surface.polygon_normal( 0 ) != answer:
raise ValueError( "[Test] PolygonalSurface polygon normal is not correct" )
def test_polygon_vertex_normal():
polygonal_surface = mesh.PolygonalSurface3D.create()
builder = mesh.PolygonalSurfaceBuilder3D.create( polygonal_surface )
builder.create_point( geom.Point3D( [ 0.0, 0.0, 0.0 ] ) )
builder.create_point( geom.Point3D( [ 1.0, 0.0, 0.0 ] ) )
builder.create_point( geom.Point3D( [ 0.0, 1.0, 1.0 ] ) )
builder.create_point( geom.Point3D( [ 0.0, -1.0, 1.0 ] ) )
builder.create_polygon( [ 0, 1, 2 ] )
builder.create_polygon( [ 0, 3, 1 ] )
builder.compute_polygon_adjacencies()
answer = geom.Vector3D( [ 0., 0., 1. ] )
if polygonal_surface.polygon_vertex_normal( 0 ) != answer:
raise ValueError( "[Test] PolygonalSurface polygon vertex normal is not correct" )
def test_io( polygonal_surface, filename ):
mesh.save_polygonal_surface3D( polygonal_surface, filename )
new_polygonal_surface = mesh.load_polygonal_surface3D( filename )
if new_polygonal_surface.nb_vertices() != 7:
raise ValueError( "[Test] Reloaded PolygonalSurface should have 7 vertices" )
if new_polygonal_surface.edges().nb_edges() != 9:
raise ValueError( "[Test] Reloaded PolygonalSurface should have 9 edges" | |
"21-1950294",
"21-1949457",
"21-1950269",
"21-1950277",
"21-1950304",
"20-1939262",
"21-1950447",
"21-1946987",
"21-1947511",
"19-1892768",
"21-1947479",
"20-1943443",
"21-1950377",
"21-1950378",
"19-1888392",
"21-1947180",
"18-1883358",
"21-1949234",
"21-1950421",
"21-1950418",
"21-1950512",
"21-1950398",
"21-1951388",
"20-1931984",
"19-1886924",
"19-1887251",
"21-1950604",
"21-1950634",
"21-1950631",
"21-1950623",
"21-1950642",
"21-1950742",
"21-1950739",
"21-1949818",
"20-1936009",
"19-1895360",
"19-1895366",
"18-1855291",
"17-0830888",
"19-1896577",
"21-1950687",
"20-1918308",
"19-1888283",
"19-1898156",
"19-1888276",
"21-1950802",
"21-1950682",
"21-1950786",
"21-1950781",
"21-1950767",
"21-1950860",
"18-1857583",
"20-1932244",
"18-1865874",
"21-1950817",
"19-1890110",
"21-1950877",
"21-1950823",
"21-1950886",
"21-1950912",
"19-1917439",
"21-1946522",
"18-1886292",
"21-1951045",
"21-1951041",
"21-1950875",
"21-1951033",
"21-1951005",
"21-1951044",
"20-1923066",
"21-1948458",
"21-1951042",
"21-1950974",
"21-1950948",
"21-1951008",
"21-1950996",
"21-1950961",
"21-1950951",
"21-1950958",
"21-1951019",
"21-1950967",
"21-1950929",
"21-1950992",
"21-1951238",
"21-1951007",
"21-1951204",
"21-1952669",
"21-1952681",
"20-1919616",
"19-1916781",
"21-1951086",
"21-1951183",
"21-1951173",
"21-1951120",
"21-1951054",
"21-1951227",
"21-1947946",
"19-1895896",
"19-1895914",
"19-1895919",
"19-1895911",
"18-1874672",
"21-1947247",
"20-1944508",
"21-1951296",
"21-1951301",
"21-1951411",
"19-1910222",
"19-1900508",
"19-1916946",
"19-1916944",
"19-1916943",
"20-1942120",
"21-1951428",
"21-1951429",
"21-1951430",
"21-1951485",
"19-1908582",
"21-1951482",
"21-1951405",
"21-1951463",
"21-1951409",
"21-1951417",
"21-1951408",
"19-1912061",
"21-1951570",
"21-1951597",
"19-1912055",
"18-1874367",
"20-1921889",
"19-1907971",
"21-1951548",
"21-1949904",
"21-1951599",
"21-1951542",
"21-1951604",
"20-1925064",
"21-1951671",
"21-1949593",
"21-1951649",
"21-1951729",
"20-1928305",
"18-1857821",
"21-1951858",
"18-1881381",
"21-1951764",
"21-1951583",
"20-1920027",
"21-1951805",
"21-1963016",
"21-1951840",
"21-1951901",
"21-1951747",
"21-1951775",
"21-1951796",
"21-1948552",
"20-1937975",
"21-1952029",
"21-1952030",
"21-1952031",
"18-1886608",
"19-1895207",
"18-1882467",
"19-1911961",
"21-1951715",
"21-1951980",
"21-1948575",
"21-1952016",
"21-1951977",
"21-1950765",
"19-1916941",
"20-1928675",
"21-1952042",
"19-1913848",
"21-1952095",
"21-1952154",
"21-1952180",
"21-1952132",
"21-1952049",
"21-1952103",
"21-1952101",
"19-1888632",
"21-1952263",
"20-1920280",
"20-1918865",
"21-1952328",
"21-1952330",
"18-1868390",
"21-1952319",
"19-1891981",
"19-1907904",
"21-1952324",
"21-1952287",
"21-1952230",
"21-1952218",
"21-1952215",
"21-1952234",
"21-1952280",
"18-1878190",
"21-1952405",
"21-1952364",
"21-1952415",
"21-1952367",
"21-1952403",
"21-1952418",
"21-1952410",
"19-1892225",
"20-1942743",
"18-1858565",
"18-1882305",
"21-1952554",
"21-1952746",
"21-1952484",
"21-1952469",
"21-1952472",
"19-1916832",
"21-1953057",
"21-1952743",
"21-1953054",
"21-1952636",
"20-1925865",
"21-1952727",
"21-1952713",
"21-1952694",
"21-1952674",
"21-1952741",
"20-1931539",
"21-1952750",
"21-1955375",
"21-1955380",
"21-1952761",
"20-1932067",
"16-0795539",
"20-1932043",
"20-1932039",
"21-1952767",
"21-1952558",
"21-1952788",
"21-1952771",
"21-1952819",
"21-1952836",
"18-1877875",
"19-1889473",
"18-1884026",
"19-1889475",
"21-1952948",
"19-1899089",
"21-1952925",
"21-1952862",
"21-1952881",
"21-1952915",
"21-1952859",
"21-1953059",
"19-1911836",
"21-1952978",
"21-1950892",
"21-1953056",
"21-1953058",
"21-1953047",
"21-1952950",
"19-1911903",
"21-1952295",
"21-1952964",
"21-1952847",
"21-1953022",
"21-1953002",
"19-1891543",
"21-1953052",
"21-1953018",
"21-1952962",
"21-1953178",
"18-1879483",
"19-1917545",
"21-1953105",
"15-0771204",
"21-1953229",
"21-1953133",
"21-1953233",
"21-1953110",
"21-1953158",
"21-1953303",
"21-1953320",
"21-1953258",
"21-1953354",
"21-1953305",
"21-1953309",
"21-1953262",
"21-1950672",
"21-1952066",
"21-1952293",
"18-1880987",
"21-1950678",
"19-1912417",
"20-1930955",
"20-1930977",
"19-1889880",
"21-1953427",
"21-1953430",
"18-1877710",
"21-1953428",
"18-1863734",
"18-1867772",
"21-1953358",
"21-1953380",
"21-1953429",
"21-1953407",
"21-1953378",
"21-1953346",
"21-1953353",
"18-1884177",
"19-1913259",
"18-1877786",
"19-1888151",
"21-1953530",
"21-1953535",
"19-1898487",
"21-1953649",
"20-1928395",
"19-1898497",
"21-1953938",
"21-1953584",
"21-1953591",
"21-1953620",
"21-1946229",
"18-1864721",
"18-1864718",
"14-0749345",
"21-1946487",
"21-1961113",
"21-1953768",
"21-1953679",
"21-1953803",
"21-1953825",
"21-1953780",
"21-1953834",
"21-1953905",
"21-1953901",
"21-1953849",
"21-1953843",
"19-1911909",
"21-1953927",
"19-1900615",
"21-1953890",
"21-1953997",
"19-1903341",
"19-1903366",
"21-1953991",
"21-1954012",
"21-1953970",
"21-1953956",
"21-1954005",
"19-1896047",
"21-1954107",
"21-1954117",
"21-1954074",
"21-1954091",
"21-1954062",
"21-1954067",
"21-1954061",
"21-1954054",
"19-1892105",
"19-1915453",
"19-1898132",
"21-1954134",
"21-1954224",
"21-1954181",
"21-1954200",
"21-1954148",
"20-1931248",
"21-1954795",
"21-1954809",
"21-1954820",
"21-1950475",
"21-1954504",
"19-1895655",
"19-1895587",
"21-1954210",
"21-1954257",
"21-1954389",
"21-1954337",
"21-1954251",
"21-1954455",
"21-1955161",
"21-1954433",
"21-1954481",
"21-1954713",
"21-1954716",
"21-1954513",
"17-1835521",
"21-1954611",
"21-1954612",
"21-1954566",
"18-1883242",
"21-1954537",
"21-1954522",
"21-1954584",
"21-1954586",
"19-1913450",
"19-1902585",
"21-1954623",
"21-1949565",
"19-1897242",
"19-1910622",
"21-1954674",
"21-1954704",
"21-1954706",
"19-1898642",
"20-1925174",
"19-1910832",
"19-1897243",
"21-1954671",
"21-1954625",
"21-1954661",
"21-1954617",
"21-1954666",
"21-1954694",
"21-1954667",
"21-1954721",
"20-1928674",
"19-1911277",
"21-1954764",
"21-1954722",
"21-1954723",
"21-1954726",
"21-1954818",
"21-1954755",
"21-1954730",
"19-1913238",
"21-1954938",
"21-1954944",
"21-1954946",
"18-1880494",
"19-1913461",
"21-1955067",
"19-1899068",
"19-1903444",
"21-1955068",
"19-1912885",
"21-1954990",
"21-1954981",
"20-1920056",
"19-1913360",
"20-1921513",
"21-1955083",
"21-1955122",
"21-1955073",
"21-1955048",
"21-1954286",
"19-1907518",
"21-1955228",
"21-1955220",
"21-1955225",
"21-1955168",
"21-1955204",
"21-1955196",
"21-1955166",
"21-1955195",
"21-1955297",
"18-1874701",
"21-1955287",
"20-1926794",
"21-1955311",
"21-1955269",
"21-1955301",
"21-1955365",
"21-1955282",
"21-1955306",
"21-1955352",
"20-1935250",
"20-1935262",
"19-1912507",
"20-1921506",
"20-1921509",
"21-1956084",
"20-1939090",
"18-1861407",
"19-1892437",
"21-1955420",
"21-1951168",
"17-1849352",
"21-1955413",
"21-1955428",
"21-1955446",
"19-1895126",
"19-1889600",
"21-1955696",
"21-1955691",
"21-1953960",
"21-1955554",
"21-1955782",
"21-1956396",
"19-1908542",
"18-1884390",
"21-1955751",
"21-1955797",
"21-1955900",
"21-1955901",
"21-1955894",
"21-1955895",
"21-1955896",
"21-1955902",
"21-1955816",
"20-1920096",
"21-1955874",
"21-1955875",
"20-1929966",
"19-1916624",
"21-1955939",
"19-1914994",
"17-1851452",
"20-1919713",
"20-1939020",
"20-1930864",
"20-1920149",
"19-1906852",
"21-1955932",
"21-1955961",
"21-1955973",
"17-0824366",
"18-1886528",
"17-0824372",
"20-1957523",
"21-1956021",
"21-1956065",
"21-1956011",
"21-1950853",
"20-1921113",
"21-1954498",
"21-1956204",
"21-1956155",
"21-1956294",
"21-1956360",
"21-1956341",
"21-1956364",
"21-1956295",
"19-1916818",
"18-1877797",
"20-1935082",
"21-1956433",
"21-1956455",
"21-1956469",
"21-1956525",
"21-1956548",
"21-1956477",
"21-1956466",
"21-1956540",
"21-1956623",
"21-1956640",
"21-1956641",
"21-1956648",
"20-1920494",
"19-1908230",
"21-1956645",
"21-1956653",
"21-1956580",
"21-1956607",
"21-1956574",
"19-1913371",
"18-1859032",
"19-1892815",
"21-1956685",
"21-1956680",
"21-1956692",
"21-1956718",
"21-1956681",
"19-1908500",
"19-1898651",
"21-1956908",
"21-1956905",
"21-1956909",
"21-1956926",
"18-1868639",
"21-1956984",
"21-1957036",
"21-1957072",
"20-1918537",
"20-1925955",
"21-1957106",
"18-1875094",
"21-1957946",
"21-1957126",
"21-1957150",
"21-1957156",
"21-1957121",
"21-1957103",
"21-1957081",
"21-1957076",
"21-1957151",
"21-1957097",
"21-1957172",
"19-1917535",
"18-1881018",
"18-1868782",
"19-1914708",
"20-1929092",
"19-1909966",
"18-1875135",
"21-1957281",
"21-1957282",
"19-1911910",
"21-1957256",
"21-1957225",
"21-1957203",
"21-1957228",
"21-1957219",
"19-1896662",
"19-1913741",
"21-1957374",
"21-1957384",
"20-1931593",
"21-1957303",
"21-1957386",
"21-1957361",
"15-0781442",
"17-1850691",
"19-1913856",
"21-1957509",
"21-1957557",
"21-1957543",
"21-1957542",
"20-1918673",
"19-1898152",
"21-1957630",
"21-1957598",
"21-1957668",
"18-1867716",
"21-1958587",
"21-1957758",
"21-1957725",
"21-1957774",
"21-1958449",
"21-1957832",
"21-1957833",
"18-1873723",
"21-1957835",
"21-1957815",
"21-1957802",
"21-1957949",
"19-1914343",
"21-1957957",
"21-1957881",
"21-1957883",
"21-1958058",
"21-1957986",
"20-1930473",
"19-1888295",
"21-1958158",
"20-1931192",
"19-1893459",
"21-1957952",
"21-1958025",
"21-1958039",
"21-1958038",
"21-1958079",
"19-1901619",
"21-1947819",
"21-1958162",
"21-1958147",
"21-1958289",
"21-1958355",
"21-1958376",
"21-1958375",
"21-1958303",
"21-1958311",
"19-1901596",
"21-1958344",
"21-1958359",
"21-1958353",
"21-1958323",
"21-1958326",
"21-1958305",
"21-1958356",
"21-1958361",
"21-1958354",
"21-1958351",
"21-1958431",
"21-1963031",
"18-1872781",
"21-1958603",
"21-1958422",
"21-1958384",
"21-1958434",
"19-1917497",
"21-1958416",
"21-1958447",
"21-1958457",
"19-1905619",
"21-1958391",
"21-1958430",
"21-1958444",
"21-1958393",
"21-1958427",
"21-1958458",
"21-1958401",
"21-1958408",
"19-1902190",
"19-1892460",
"21-1958577",
"21-1958523",
"21-1958513",
"21-1952009",
"21-1959280",
"21-1959282",
"21-1959304",
"19-1908009",
"21-1958669",
"21-1958681",
"21-1963040",
"21-1958635",
"21-1958602",
"21-1958596",
"21-1958450",
"21-1958719",
"21-1958730",
"19-1903188",
"21-1958829",
"21-1958776",
"21-1954059",
"19-1913964",
"20-1917976",
"21-1958916",
"20-1918408",
"19-1901263",
"19-1901260",
"19-1889033",
"19-1912189",
"21-1958910",
"21-1958885",
"21-1958891",
"21-1958944",
"19-1886813",
"18-1881742",
"18-1882460",
"20-1931175",
"19-1893461",
"19-1893458",
"20-1919127",
"21-1959068",
"21-1959008",
"21-1959053",
"21-1959073",
"19-1917483",
"21-1959314",
"21-1950924",
"21-1945733",
"21-1950926",
"21-1945717",
"21-1959313",
"21-1956454",
"18-1880630",
"20-1931145",
"21-1959114",
"21-1959262",
"21-1959455",
"21-1959397",
"21-1959294",
"21-1959350",
"21-1959340",
"21-1959479",
"21-1959498",
"21-1959297",
"21-1959303",
"21-1959438",
"21-1959587",
"21-1959574",
"21-1959540",
"21-1959503",
"21-1959562",
"21-1959601",
"19-1890469",
"21-1959611",
"21-1959684",
"19-1906494",
"20-1940527",
"20-1927421",
"20-1921346",
"21-1959877",
"20-1942734",
"20-1942741",
"21-1959621",
"21-1959651",
"21-1959634",
"21-1958386",
"19-1899160",
"19-1899169",
"19-1892555",
"19-1917234",
"19-1899173",
"21-1959995",
"21-1959971",
"17-1852062",
"21-1960102",
"19-1898084",
"19-1901960",
"19-1899888",
"20-1934222",
"21-1960188",
"21-1960209",
"18-1882795",
"21-1960316",
"21-1960401",
"21-1960289",
"18-1857721",
"21-1960755",
"21-1960304",
"19-1900584",
"19-1900575",
"19-1900561",
"19-1900555",
"19-1900539",
"19-1900544",
"19-1900535",
"21-1960445",
"18-1877942",
"20-1926882",
"21-1960504",
"21-1960448",
"18-1880066",
"21-1960573",
"21-1960639",
"20-1919566",
"21-1960604",
"21-1960658",
"18-1867206",
"21-1960665",
"21-1960663",
"21-1952551",
"18-1871901",
"18-1880907",
"18-1862913",
"21-1957840",
"17-1848446",
"21-1960874",
"21-1960876",
"21-1960883",
"21-1960884",
"19-1911042",
"21-1960888",
"21-1953226",
"21-1960860",
"21-1960892",
"18-1885514",
"21-1961223",
"21-1961020",
"21-1961003",
"21-1961007",
"21-1961015",
"15-0781443",
"21-1961201",
"21-1961110",
"21-1961209",
"21-1956595",
"21-1961511",
"21-1961468",
"21-1961450",
"19-1912420",
"19-1886911",
"19-1898162",
"21-1961535",
"19-1890422",
"17-1853162",
"19-1894648",
"19-1896430",
"19-1895070",
"19-1895072",
"19-1899252",
"19-1910329",
"18-1874516",
"18-1860481",
"19-1917190",
"18-1867135",
"18-1867139",
"17-1845051",
"20-1936978",
"20-1939010",
"19-1908000",]
#Atlas Clients Names
AtlasClientsNames= ["<NAME>, <NAME>",
"<NAME>",
"<NAME>",
"Elazab, <NAME>",
"<NAME>",
"<NAME>, <NAME>",
"<NAME>, Angelica",
"<NAME>, Ismael", ]
#HRA Case Coding
#Putting Cases into HRA's Baskets!
def HRA_Case_Coding(LPC,SLPC,HRA_LoS,Crim):
LPC = str(LPC)
SLPC = str(SLPC)
if HRA_LoS == '***Needs Cleanup***':
return ""
elif HRA_LoS == 'Hold For Review':
return 'Hold For Review'
elif LPC.startswith('2') == True and HRA_LoS == 'Advice':
return 'B -EMP'
elif LPC.startswith('2') == True and HRA_LoS == 'Brief Service':
return 'B -EMP'
elif SLPC == 'G-639' and HRA_LoS == 'Advice':
return 'B -INQ'
elif SLPC == 'G-639' and HRA_LoS == 'Brief Service':
return 'B -INQ'
elif SLPC == 'I-914' and HRA_LoS == 'Advice':
return 'B -CERT'
elif SLPC == 'I-914' and HRA_LoS == 'Brief Service':
return 'B -CERT'
elif SLPC == 'I-918' and HRA_LoS == 'Advice':
return 'B -CERT'
elif SLPC == 'I-918' and HRA_LoS == 'Brief Service':
return 'B -CERT'
elif HRA_LoS == 'Advice' or HRA_LoS == 'Brief Service':
return 'B -ADVI'
elif SLPC == "Emergency Planning":
return 'B -APD'
elif SLPC == "Parental Designation Form":
return 'B -OTH_Parental Designation Form'
elif Crim == "Yes":
return 'T2-OTH_CRM'
elif SLPC == "I-589 Affirmative" or SLPC == "I-730":
return 'T2-AR'
elif SLPC == "I-589 Defensive" or SLPC == "Removal Defense" or SLPC == "EOIR-40" or SLPC == "EOIR-42A"or SLPC == "EOIR-42B" or SLPC == "I-212" or SLPC == "I-485 Defensive":
return 'T2-RD'
elif SLPC == "I-912":
return 'T1-OTH_I-912'
elif SLPC == "I-130 (spouse)":
return 'T2-MAR'
elif SLPC == "I-129F" or SLPC == "I-130" or SLPC == "I-751" or SLPC == "I-864"or SLPC == "I-864EZ" or SLPC == "AOS I-130":
return 'T1-FAM'
elif SLPC == "204(L)":
return 'T2-HO_204(L)'
elif SLPC == "AR-11":
return 'T1-OTH_AR-11'
elif SLPC == "DS-160" or SLPC == "DS-260":
return 'T1-CON'
elif SLPC == "EOIR 27" or SLPC == "Mandamus Action" or SLPC == "EOIR-26":
return 'T2-FED'
elif SLPC == "EOIR-29 BIA Appeal" or SLPC.startswith("I-290B") == True or SLPC == "N-336" or SLPC == "EOIR-29":
return 'T2-APO'
elif SLPC == "G-639":
return 'T1-OTH_G639'
elif SLPC == "I-102":
return 'T1-OTH_I102'
elif SLPC.startswith("I-131") == True:
return 'T1-TRV'
elif SLPC == "I-192":
return 'T2-OTH_I-192'
elif SLPC == "I-360 SIJS" or LPC == "44 Minor Guardianship / Conservatorship" or LPC == "42 Neglected/Abused/Dependent":
return 'T2-SIJS'
elif SLPC == "I-360 VAWA Self-Petition":
return 'T2-VAWA'
elif SLPC == "I-539":
return 'T1-OTH_I539'
elif SLPC == "I-601" or SLPC == "I-601A":
return 'T2-WOI'
elif SLPC == "I-765":
return 'T1-EAD'
elif SLPC == "I-821":
return 'T1-TPS'
elif | |
= f'''{("!" if expr.operator == "!=" else "")}Objects.equals({self.expr(expr.left)}, {self.expr(expr.right)})'''
else:
res = f'''{self.expr(expr.left)} {expr.operator} {self.expr(expr.right)}'''
else:
res = f'''{self.expr(expr.left)} {expr.operator} {self.mutated_expr(expr.right, expr.left if expr.operator == "=" else None)}'''
elif isinstance(expr, exprs.ArrayLiteral):
if len(expr.items) == 0:
res = f'''new {self.type(expr.actual_type, True, True)}()'''
else:
self.imports[f'''java.util.List'''] = None
self.imports[f'''java.util.ArrayList'''] = None
res = f'''new ArrayList<>(List.of({", ".join(list(map(lambda x: self.expr(x), expr.items)))}))'''
elif isinstance(expr, exprs.CastExpression):
res = f'''(({self.type(expr.new_type)}){self.expr(expr.expression)})'''
elif isinstance(expr, exprs.ConditionalExpression):
res = f'''{self.expr(expr.condition)} ? {self.expr(expr.when_true)} : {self.mutated_expr(expr.when_false, expr.when_true)}'''
elif isinstance(expr, exprs.InstanceOfExpression):
res = f'''{self.expr(expr.expr)} instanceof {self.type(expr.check_type)}'''
elif isinstance(expr, exprs.ParenthesizedExpression):
res = f'''({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.RegexLiteral):
self.imports[f'''io.onelang.std.core.RegExp'''] = None
res = f'''new RegExp({json.dumps(expr.pattern, separators=(',', ':'))})'''
elif isinstance(expr, types.Lambda):
if len(expr.body.statements) == 1 and isinstance(expr.body.statements[0], stats.ReturnStatement):
body = " " + self.expr((expr.body.statements[0]).expression)
else:
body = self.block(expr.body, False)
params = list(map(lambda x: self.name_(x.name), expr.parameters))
res = f'''{(params[0] if len(params) == 1 else f'({", ".join(params)})')} ->{body}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.PREFIX:
res = f'''{expr.operator}{self.expr(expr.operand)}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.POSTFIX:
res = f'''{self.expr(expr.operand)}{expr.operator}'''
elif isinstance(expr, exprs.MapLiteral):
if len(expr.items) > 10:
raise Error("MapLiteral is only supported with maximum of 10 items")
if len(expr.items) == 0:
res = f'''new {self.type(expr.actual_type, True, True)}()'''
else:
self.imports[f'''java.util.Map'''] = None
self.imports[f'''java.util.LinkedHashMap'''] = None
repr = ", ".join(list(map(lambda item: f'''{json.dumps(item.key, separators=(',', ':'))}, {self.expr(item.value)}''', expr.items)))
res = f'''new LinkedHashMap<>(Map.of({repr}))'''
elif isinstance(expr, exprs.NullLiteral):
res = f'''null'''
elif isinstance(expr, exprs.AwaitExpression):
res = f'''{self.expr(expr.expr)}'''
elif isinstance(expr, refs.ThisReference):
res = f'''this'''
elif isinstance(expr, refs.StaticThisReference):
res = f'''{self.current_class.name}'''
elif isinstance(expr, refs.EnumReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.ClassReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.MethodParameterReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.VariableDeclarationReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.ForVariableReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.ForeachVariableReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.CatchVariableReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.GlobalFunctionReference):
res = f'''{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.SuperReference):
res = f'''super'''
elif isinstance(expr, refs.StaticFieldReference):
res = f'''{self.name_(expr.decl.parent_interface.name)}.{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.StaticPropertyReference):
res = f'''{self.name_(expr.decl.parent_class.name)}.{self.name_(expr.decl.name)}'''
elif isinstance(expr, refs.InstanceFieldReference):
# TODO: unified handling of field -> property conversion?
if self.use_getter_setter(expr):
res = f'''{self.expr(expr.object)}.get{self.uc_first(expr.field.name)}()'''
else:
res = f'''{self.expr(expr.object)}.{self.name_(expr.field.name)}'''
elif isinstance(expr, refs.InstancePropertyReference):
res = f'''{self.expr(expr.object)}.{("set" if self.is_set_expr(expr) else "get")}{self.uc_first(expr.property.name)}()'''
elif isinstance(expr, refs.EnumMemberReference):
res = f'''{self.name_(expr.decl.parent_enum.name)}.{self.name_(expr.decl.name)}'''
elif isinstance(expr, exprs.NullCoalesceExpression):
res = f'''({self.expr(expr.default_expr)} != null ? ({self.expr(expr.default_expr)}) : ({self.mutated_expr(expr.expr_if_null, expr.default_expr)}))'''
else:
pass
return res
def use_getter_setter(self, field_ref):
return isinstance(field_ref.object.actual_type, astTypes.InterfaceType) or (field_ref.field.interface_declarations != None and len(field_ref.field.interface_declarations) > 0)
def block(self, block, allow_one_liner = True):
stmt_len = len(block.statements)
return " { }" if stmt_len == 0 else f'''\n{self.pad(self.raw_block(block))}''' if allow_one_liner and stmt_len == 1 and not (isinstance(block.statements[0], stats.IfStatement)) and not (isinstance(block.statements[0], stats.VariableDeclaration)) else f''' {{\n{self.pad(self.raw_block(block))}\n}}'''
def stmt_default(self, stmt):
res = "UNKNOWN-STATEMENT"
if isinstance(stmt, stats.BreakStatement):
res = "break;"
elif isinstance(stmt, stats.ReturnStatement):
res = "return;" if stmt.expression == None else f'''return {self.mutate_arg(stmt.expression, False)};'''
elif isinstance(stmt, stats.UnsetStatement):
res = f'''/* unset {self.expr(stmt.expression)}; */'''
elif isinstance(stmt, stats.ThrowStatement):
res = f'''throw {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ExpressionStatement):
res = f'''{self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.VariableDeclaration):
if isinstance(stmt.initializer, exprs.NullLiteral):
res = f'''{self.type(stmt.type, stmt.mutability.mutated)} {self.name_(stmt.name)} = null;'''
elif stmt.initializer != None:
res = f'''var {self.name_(stmt.name)} = {self.mutate_arg(stmt.initializer, stmt.mutability.mutated)};'''
else:
res = f'''{self.type(stmt.type)} {self.name_(stmt.name)};'''
elif isinstance(stmt, stats.ForeachStatement):
res = f'''for (var {self.name_(stmt.item_var.name)} : {self.expr(stmt.items)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.IfStatement):
else_if = stmt.else_ != None and len(stmt.else_.statements) == 1 and isinstance(stmt.else_.statements[0], stats.IfStatement)
res = f'''if ({self.expr(stmt.condition)}){self.block(stmt.then)}'''
res += (f'''\nelse {self.stmt(stmt.else_.statements[0])}''' if else_if else "") + (f'''\nelse''' + self.block(stmt.else_) if not else_if and stmt.else_ != None else "")
elif isinstance(stmt, stats.WhileStatement):
res = f'''while ({self.expr(stmt.condition)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.ForStatement):
res = f'''for ({(self.var(stmt.item_var, None) if stmt.item_var != None else "")}; {self.expr(stmt.condition)}; {self.expr(stmt.incrementor)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.DoStatement):
res = f'''do{self.block(stmt.body)} while ({self.expr(stmt.condition)});'''
elif isinstance(stmt, stats.TryStatement):
res = "try" + self.block(stmt.try_body, False)
if stmt.catch_body != None:
#this.imports.add("System");
res += f''' catch (Exception {self.name_(stmt.catch_var.name)}) {self.block(stmt.catch_body, False)}'''
if stmt.finally_body != None:
res += "finally" + self.block(stmt.finally_body)
elif isinstance(stmt, stats.ContinueStatement):
res = f'''continue;'''
else:
pass
return res
def stmt(self, stmt):
res = None
if stmt.attributes != None and "java-import" in stmt.attributes:
for imp in re.split("\\n", stmt.attributes.get("java-import")):
self.imports[imp] = None
if stmt.attributes != None and "java" in stmt.attributes:
res = stmt.attributes.get("java")
else:
for plugin in self.plugins:
res = plugin.stmt(stmt)
if res != None:
break
if res == None:
res = self.stmt_default(stmt)
return self.leading(stmt) + res
def stmts(self, stmts):
return "\n".join(list(map(lambda stmt: self.stmt(stmt), stmts)))
def raw_block(self, block):
return self.stmts(block.statements)
def method_gen(self, prefix, params, body):
return f'''{prefix}({", ".join(list(map(lambda p: self.var_wo_init(p, p), params)))}){body}'''
def method(self, method, is_cls):
# TODO: final
prefix = (self.vis(method.visibility) + " " if is_cls else "") + self.pre_if("static ", method.is_static) + self.pre_if("/* throws */ ", method.throws) + (f'''<{", ".join(method.type_arguments)}> ''' if len(method.type_arguments) > 0 else "") + f'''{self.type(method.returns, False)} ''' + self.name_(method.name)
return self.method_gen(prefix, method.parameters, ";" if method.body == None else f''' {{\n{self.pad(self.stmts(method.body.statements))}\n}}''')
def class_(self, cls_):
self.current_class = cls_
res_list = []
static_constructor_stmts = []
complex_field_inits = []
field_reprs = []
prop_reprs = []
for field in cls_.fields:
is_initializer_complex = field.initializer != None and not (isinstance(field.initializer, exprs.StringLiteral)) and not (isinstance(field.initializer, exprs.BooleanLiteral)) and not (isinstance(field.initializer, exprs.NumericLiteral))
prefix = f'''{self.vis(field.visibility)} {self.pre_if("static ", field.is_static)}'''
if len(field.interface_declarations) > 0:
var_type = self.var_type(field, field)
name = self.name_(field.name)
pname = self.uc_first(field.name)
set_to_false = astTypes.TypeHelper.equals(field.type, self.current_class.parent_file.literal_types.boolean)
prop_reprs.append(f'''{var_type} {name}{(" = false" if set_to_false else f' = {self.expr(field.initializer)}' if field.initializer != None else "")};\n''' + f'''{prefix}{var_type} get{pname}() {{ return this.{name}; }}\n''' + f'''{prefix}void set{pname}({var_type} value) {{ this.{name} = value; }}''')
elif is_initializer_complex:
if field.is_static:
static_constructor_stmts.append(stats.ExpressionStatement(exprs.BinaryExpression(refs.StaticFieldReference(field), "=", field.initializer)))
else:
complex_field_inits.append(stats.ExpressionStatement(exprs.BinaryExpression(refs.InstanceFieldReference(refs.ThisReference(cls_), field), "=", field.initializer)))
field_reprs.append(f'''{prefix}{self.var_wo_init(field, field)};''')
else:
field_reprs.append(f'''{prefix}{self.var(field, field)};''')
res_list.append("\n".join(field_reprs))
res_list.append("\n\n".join(prop_reprs))
for prop in cls_.properties:
prefix = f'''{self.vis(prop.visibility)} {self.pre_if("static ", prop.is_static)}'''
if prop.getter != None:
res_list.append(f'''{prefix}{self.type(prop.type)} get{self.uc_first(prop.name)}(){self.block(prop.getter, False)}''')
if prop.setter != None:
res_list.append(f'''{prefix}void set{self.uc_first(prop.name)}({self.type(prop.type)} value){self.block(prop.setter, False)}''')
if len(static_constructor_stmts) > 0:
res_list.append(f'''static {{\n{self.pad(self.stmts(static_constructor_stmts))}\n}}''')
if cls_.constructor_ != None:
constr_field_inits = []
for field in list(filter(lambda x: x.constructor_param != None, cls_.fields)):
field_ref = refs.InstanceFieldReference(refs.ThisReference(cls_), field)
mp_ref = refs.MethodParameterReference(field.constructor_param)
# TODO: decide what to do with "after-TypeEngine" transformations
mp_ref.set_actual_type(field.type, False, False)
constr_field_inits.append(stats.ExpressionStatement(exprs.BinaryExpression(field_ref, "=", mp_ref)))
super_call = f'''super({", ".join(list(map(lambda x: self.expr(x), cls_.constructor_.super_call_args)))});\n''' if cls_.constructor_.super_call_args != None else ""
# @java var stmts = Stream.of(constrFieldInits, complexFieldInits, cls.constructor_.getBody().statements).flatMap(Collection::stream).toArray(Statement[]::new);
# @java-import java.util.Collection
# @java-import java.util.stream.Stream
stmts = constr_field_inits + complex_field_inits + cls_.constructor_.body.statements
# TODO: super calls
res_list.append(self.method_gen("public " + self.pre_if("/* throws */ ", cls_.constructor_.throws) + self.name_(cls_.name), cls_.constructor_.parameters, f'''\n{{\n{self.pad(super_call + self.stmts(stmts))}\n}}'''))
elif len(complex_field_inits) > 0:
res_list.append(f'''public {self.name_(cls_.name)}()\n{{\n{self.pad(self.stmts(complex_field_inits))}\n}}''')
methods = []
for method in cls_.methods:
if method.body == None:
continue
# declaration only
methods.append(self.method(method, True))
res_list.append("\n\n".join(methods))
return self.pad("\n\n".join(list(filter(lambda x: x != "", res_list))))
def uc_first(self, str):
return str[0].upper() + str[1:]
def interface(self, intf):
self.current_class = intf
res_list = []
for field in intf.fields:
var_type = self.var_type(field, field)
name = self.uc_first(field.name)
res_list.append(f'''{var_type} get{name}();\nvoid set{name}({var_type} value);''')
res_list.append("\n".join(list(map(lambda method: self.method(method, False), intf.methods))))
return self.pad("\n\n".join(list(filter(lambda x: x != "", res_list))))
def pad(self, str):
return "\n".join(list(map(lambda x: f''' {x}''', re.split("\\n", str))))
def path_to_ns(self, path):
# Generator/ExprLang/ExprLangAst.ts -> Generator.ExprLang
parts = re.split("/", path)
parts.pop()
return ".".join(parts)
def imports_head(self):
imports = []
for imp in self.imports.keys():
imports.append(imp)
self.imports = dict()
return "" if len(imports) == 0 else "\n".join(list(map(lambda x: f'''import {x};''', imports))) + "\n\n"
def to_import(self, scope):
# TODO: hack
if scope.scope_name == "index":
name = re.sub("One\\.", "", re.split("-", scope.package_name)[0]).lower()
return f'''io.onelang.std.{name}'''
return f'''{scope.package_name}.{re.sub("/", ".", scope.scope_name)}'''
def generate(self, pkg):
result = []
for path in pkg.files.keys():
file = pkg.files.get(path)
package_path = f'''{pkg.name}/{file.source_path.path}'''
dst_dir = f'''src/main/java/{package_path}'''
package_name = re.sub("/", ".", package_path)
imports = dict()
for imp_list in file.imports:
imp_pkg = self.to_import(imp_list.export_scope)
for imp in imp_list.imports:
imports[f'''{imp_pkg}.{imp.name}'''] = None
head_imports = "\n".join(list(map(lambda x: f'''import {x};''', Array.from_(imports.keys()))))
head = f'''package {package_name};\n\n{head_imports}\n\n'''
for enum_ in file.enums:
result.append(genFile.GeneratedFile(f'''{dst_dir}/{enum_.name}.java''', f'''{head}public enum {self.name_(enum_.name)} {{ {", ".join(list(map(lambda x: self.name_(x.name), enum_.values)))} }}'''))
for intf in file.interfaces:
res = f'''public interface {self.name_(intf.name)}{self.type_args(intf.type_arguments)}''' + f'''{self.pre_arr(" extends ", list(map(lambda x: self.type(x), intf.base_interfaces)))} {{\n{self.interface(intf)}\n}}'''
result.append(genFile.GeneratedFile(f'''{dst_dir}/{intf.name}.java''', f'''{head}{self.imports_head()}{res}'''))
for cls_ in file.classes:
res = f'''public class {self.name_(cls_.name)}{self.type_args(cls_.type_arguments)}''' + (f''' extends {self.type(cls_.base_class)}''' if cls_.base_class != None else "") + self.pre_arr(" implements ", list(map(lambda x: self.type(x), | |
0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5, 0.5],
[1, 0.5, 0.5, 1],
[0.5, 0.5, 1, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 0, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multidimensional(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
target_dimensions = (2, 3)
unmatched_class_label = tf.constant(np.zeros(target_dimensions),
tf.float32)
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
with self.assertRaises(ValueError):
_, _, _, _ = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
class CreateTargetAssignerTest(test_case.TestCase):
def test_create_target_assigner(self):
"""Tests that named constructor gives working target assigners.
TODO(rathodv): Make this test more general.
"""
corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
if tf_version.is_tf1():
multibox_ta = (targetassigner
.create_target_assigner('Multibox', stage='proposal'))
multibox_ta.assign(priors, groundtruth)
# No tests on output, as that may vary arbitrarily as new target assigners
# are added. As long as it is constructed correctly and runs without errors,
# tests on the individual assigners cover correctness of the assignments.
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='proposal'))
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = (targetassigner
.create_target_assigner('FastRCNN'))
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='detection'))
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector',
stage='invalid_stage')
def _array_argmax(array):
return np.unravel_index(np.argmax(array), array.shape)
class CenterNetCenterHeatmapTargetAssignerTest(test_case.TestCase,
parameterized.TestCase):
def setUp(self):
super(CenterNetCenterHeatmapTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_center_location(self):
"""Test that the centers are at the correct location."""
def graph_fn():
box_batch = [tf.constant([self._box_center, self._box_lower_left])]
classes = [
tf.one_hot([0, 1], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertEqual((10, 10), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 10, 10, 0])
self.assertEqual((15, 5), _array_argmax(targets[0, :, :, 1]))
self.assertAlmostEqual(1.0, targets[0, 15, 5, 1])
@parameterized.parameters(
{'keypoint_weights_for_center': [1.0, 1.0, 1.0, 1.0]},
{'keypoint_weights_for_center': [0.0, 0.0, 1.0, 1.0]},
)
def test_center_location_by_keypoints(self, keypoint_weights_for_center):
"""Test that the centers are at the correct location."""
kpts_y = [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.0, 0.0, 0.0, 0.0]]
kpts_x = [[0.5, 0.6, 0.7, 0.8], [0.1, 0.2, 0.3, 0.4], [0.0, 0.0, 0.0, 0.0]]
gt_keypoints_list = [
tf.stack([tf.constant(kpts_y), tf.constant(kpts_x)], axis=2)
]
kpts_weight = [[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0]]
gt_keypoints_weights_list = [tf.constant(kpts_weight)]
gt_classes_list = [
tf.one_hot([0, 0, 0], depth=1),
]
gt_weights_list = [tf.constant([1.0, 1.0, 0.0])]
def graph_fn():
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4,
keypoint_class_id=0,
keypoint_indices=[0, 1, 2, 3],
keypoint_weights_for_center=keypoint_weights_for_center)
targets = assigner.assign_center_targets_from_keypoints(
80,
80,
gt_classes_list=gt_classes_list,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_keypoints_weights_list=gt_keypoints_weights_list)
return targets
targets = self.execute(graph_fn, [])
if sum(keypoint_weights_for_center) == 4.0:
# There should be two peaks at location (5, 13), and (12, 4).
# (5, 13) = ((0.1 + 0.2 + 0.3 + 0.4) / 4 * 80 / 4,
# (0.5 + 0.6 + 0.7 + 0.8) / 4 * 80 / 4)
# (12, 4) = ((0.5 + 0.7) / 2 * 80 / 4,
# (0.1 + 0.3) / 2 * 80 / 4)
self.assertEqual((5, 13), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 5, 13, 0])
self.assertEqual((1, 20, 20, 1), targets.shape)
targets[0, 5, 13, 0] = 0.0
self.assertEqual((12, 4), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 12, 4, 0])
else:
# There should be two peaks at location (5, 13), and (12, 4).
# (7, 15) = ((0.3 + 0.4) / 2 * 80 / 4,
# (0.7 + 0.8) / 2 * 80 / 4)
# (14, 6) = (0.7 * 80 / 4, 0.3 * 80 / 4)
self.assertEqual((7, 15), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 7, 15, 0])
self.assertEqual((1, 20, 20, 1), targets.shape)
targets[0, 7, 15, 0] = 0.0
self.assertEqual((14, 6), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 14, 6, 0])
def test_center_batch_shape(self):
"""Test that the shape of the target for a batch is correct."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertEqual((3, 20, 20, 4), targets.shape)
def test_center_overlap_maximum(self):
"""Test that when boxes overlap we, are computing the maximum."""
def graph_fn():
box_batch = [
tf.constant([
self._box_center, self._box_center_offset, self._box_center,
self._box_center_offset
])
]
classes = [
tf.one_hot([0, 0, 1, 2], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
class0_targets = targets[0, :, :, 0]
class1_targets = targets[0, :, :, 1]
class2_targets = targets[0, :, :, 2]
np.testing.assert_allclose(class0_targets,
np.maximum(class1_targets, class2_targets))
def test_size_blur(self):
"""Test that the heatmap of a larger box is more blurred."""
def graph_fn():
box_batch = [tf.constant([self._box_center, self._box_center_small])]
classes = [
tf.one_hot([0, 1], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertGreater(
np.count_nonzero(targets[:, :, :, 0]),
np.count_nonzero(targets[:, :, :, 1]))
def test_weights(self):
"""Test that the weights correctly ignore ground truth."""
def graph1_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph1_fn, [])
self.assertAlmostEqual(1.0, targets[0, :, :, 0].max())
self.assertAlmostEqual(1.0, targets[0, :, :, 1].max())
self.assertAlmostEqual(1.0, targets[1, :, :, 2].max())
self.assertAlmostEqual(1.0, targets[2, :, :, 3].max())
self.assertAlmostEqual(0.0, targets[0, :, :, [2, 3]].max())
self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max())
self.assertAlmostEqual(0.0, targets[2, :, :, :3].max())
def graph2_fn():
weights = [
tf.constant([0., 1.]),
tf.constant([1.]),
tf.constant([1.]),
]
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes,
weights)
return targets
targets = self.execute(graph2_fn, [])
self.assertAlmostEqual(1.0, targets[0, :, :, 1].max())
self.assertAlmostEqual(1.0, targets[1, :, :, 2].max())
self.assertAlmostEqual(1.0, targets[2, :, :, 3].max())
self.assertAlmostEqual(0.0, targets[0, :, :, [0, 2, 3]].max())
self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max())
self.assertAlmostEqual(0.0, targets[2, :, :, :3].max())
def test_low_overlap(self):
def graph1_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.1)
targets_low_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_low_overlap
targets_low_overlap = self.execute(graph1_fn, [])
self.assertLess(1, np.count_nonzero(targets_low_overlap))
def graph2_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.6)
targets_medium_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_medium_overlap
targets_medium_overlap = self.execute(graph2_fn, [])
self.assertLess(1, np.count_nonzero(targets_medium_overlap))
def graph3_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.99)
targets_high_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_high_overlap
targets_high_overlap = self.execute(graph3_fn, [])
self.assertTrue(np.all(targets_low_overlap >= targets_medium_overlap))
self.assertTrue(np.all(targets_medium_overlap >= targets_high_overlap))
def test_empty_box_list(self):
"""Test that an empty box list gives an all 0 heatmap."""
def graph_fn():
box_batch = [
tf.zeros((0, 4), dtype=tf.float32),
]
classes = [
tf.zeros((0, 5), dtype=tf.float32),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.1)
targets = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets
targets = self.execute(graph_fn, [])
np.testing.assert_allclose(targets, 0.)
class CenterNetBoxTargetAssignerTest(test_case.TestCase):
def setUp(self):
super(CenterNetBoxTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = | |
with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.access_secret_version(
service.AccessSecretVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [service.DisableSecretVersionRequest, dict,])
def test_disable_secret_version(request_type, transport: str = "grpc"):
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion(
name="name_value",
state=resources.SecretVersion.State.ENABLED,
etag="etag_value",
)
response = client.disable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.DisableSecretVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.SecretVersion)
assert response.name == "name_value"
assert response.state == resources.SecretVersion.State.ENABLED
assert response.etag == "etag_value"
def test_disable_secret_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_secret_version), "__call__"
) as call:
client.disable_secret_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.DisableSecretVersionRequest()
@pytest.mark.asyncio
async def test_disable_secret_version_async(
transport: str = "grpc_asyncio", request_type=service.DisableSecretVersionRequest
):
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion(
name="name_value",
state=resources.SecretVersion.State.ENABLED,
etag="etag_value",
)
)
response = await client.disable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.DisableSecretVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.SecretVersion)
assert response.name == "name_value"
assert response.state == resources.SecretVersion.State.ENABLED
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_disable_secret_version_async_from_dict():
await test_disable_secret_version_async(request_type=dict)
def test_disable_secret_version_field_headers():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DisableSecretVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_secret_version), "__call__"
) as call:
call.return_value = resources.SecretVersion()
client.disable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_disable_secret_version_field_headers_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DisableSecretVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_secret_version), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion()
)
await client.disable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_disable_secret_version_flattened():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.disable_secret_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_disable_secret_version_flattened_error():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.disable_secret_version(
service.DisableSecretVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_disable_secret_version_flattened_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.disable_secret_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_disable_secret_version_flattened_error_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.disable_secret_version(
service.DisableSecretVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [service.EnableSecretVersionRequest, dict,])
def test_enable_secret_version(request_type, transport: str = "grpc"):
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion(
name="name_value",
state=resources.SecretVersion.State.ENABLED,
etag="etag_value",
)
response = client.enable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.EnableSecretVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.SecretVersion)
assert response.name == "name_value"
assert response.state == resources.SecretVersion.State.ENABLED
assert response.etag == "etag_value"
def test_enable_secret_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_secret_version), "__call__"
) as call:
client.enable_secret_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.EnableSecretVersionRequest()
@pytest.mark.asyncio
async def test_enable_secret_version_async(
transport: str = "grpc_asyncio", request_type=service.EnableSecretVersionRequest
):
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion(
name="name_value",
state=resources.SecretVersion.State.ENABLED,
etag="etag_value",
)
)
response = await client.enable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.EnableSecretVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.SecretVersion)
assert response.name == "name_value"
assert response.state == resources.SecretVersion.State.ENABLED
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_enable_secret_version_async_from_dict():
await test_enable_secret_version_async(request_type=dict)
def test_enable_secret_version_field_headers():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.EnableSecretVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_secret_version), "__call__"
) as call:
call.return_value = resources.SecretVersion()
client.enable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_enable_secret_version_field_headers_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.EnableSecretVersionRequest()
request.name = "name/value"
| |
# TODO ML add all API functions that call into _command functions in the TincClient
import dis
from tinc import DataPool, ParameterSpace, DiskBuffer
bytecode = dis.Bytecode(f)
for instr in bytecode:
if instr.opname == 'LOAD_METHOD':
if instr.argval == 'get_slice':
print('''WARNING: calling certain Tinc functions inside callbacks can cause deadlocks and Timeout.
If this is happening use asynchronous callbacks by setting synchrouns to False when registering callback''' )
def _find_nearest(self, value):
# TODO assumes values are sorted ascending. Add checks and support for other models.
if len(self._values) > 0:
for i in range(len(self._values) - 1):
if value < (self._values[i] + self._values[i + 1]) /2:
return self._data_type(self._values[i])
return self._data_type(self._values[-1])
else:
return self._data_type(value)
def _configure_widget(self):
if self._interactive_widget is not None:
widget = self._interactive_widget.children[0]
if len(self._values) > 1:
min_step = np.min(np.diff(self._values))
else:
min_step = (self._maximum - self._minimum) /100
num_zeros = np.floor(np.abs(np.log10(min_step)))
# Heuristics to determine display presicion
temp_val = min_step * 10**(num_zeros)
while np.abs(temp_val - int(temp_val)) > 0.000001 and num_zeros < 7:
num_zeros += 1
temp_val = min_step * 10**(num_zeros)
format = f'.{int(num_zeros)}f'
widget.min = self._minimum
widget.max = self._maximum
widget.readout_format = format
widget.step = min_step
# if self._control_widget is not None:
# label = self._control_widget.children[1]
#label.value = str(self.get_value())
def register_callback_async(self, f):
self.register_callback(f, False)
def remove_callback(self, f):
# FIXME remove by name rather than by object
if self._value_callbacks.count(f) > 0:
self._value_callbacks.remove(f)
if self._async_callbacks.count(f) > 0:
self._async_callbacks.remove(f)
def clear_callbacks(self):
self._value_callbacks = []
def _trigger_callbacks(self, value):
for cb in self._value_callbacks:
if self._async_callbacks.count(cb) == 1:
print(f"starting async callback {cb}")
x = threading.Thread(target=self._cb_async_wrapper, args=(cb, value), daemon=True)
x.start()
else:
try:
cb(value)
except Exception as e:
print("Exception in parameter callback (Continuing):")
traceback.print_exc()
def _cb_async_wrapper(self, cb, value):
try:
cb(value)
except Exception as e:
print("Exception in *async* parameter callback (Continuing):")
traceback.print_exc()
class ParameterString(Parameter):
def __init__(self, tinc_id: str, group: str = "", default_value: str = "", tinc_client= None):
super().__init__(tinc_id, group, default_value=default_value, tinc_client=tinc_client)
def __str__(self):
details = f" ** Parameter {self.id} group: {self.group} ({type(self.value)})\n"
details += f" Default: {self.default}\n"
return details
def _init(self, default_value):
self._data_type = str
self.default = default_value
if default_value is None:
self.default = ""
self._value = self.default
def set_value(self, value):
self._value = self._data_type(value)
if self.tinc_client:
self.tinc_client.send_parameter_value(self)
if self._interactive_widget:
self._interactive_widget.children[0].value = self._data_type(value)
self._trigger_callbacks(self._value)
def set_value_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
# print(f"set {value.valueFloat}")
if not self._value == value.valueString:
self._value = self._data_type(value.valueString)
if self._interactive_widget:
self._interactive_widget.children[0].value = self._data_type(value.valueString)
self._trigger_callbacks(self._value)
return True
def set_space_from_message(self, message):
values = TincProtocol.ParameterSpaceValues()
message.Unpack(values)
self.ids = values.ids
count = len(values.values)
# print(f'setting space {count}')
self.values = np.ndarray((count))
for i, v in enumerate(values.values):
self.values[i] = v.valueString
return True
def set_min_from_message(self, message):
# value = TincProtocol.ParameterValue()
# message.Unpack(value)
# # print(f"min {value.valueFloat}")
# self.minimum = value.valueString
return True
def set_max_from_message(self, message):
# value = TincProtocol.ParameterValue()
# message.Unpack(value)
# # print(f"max {value.valueFloat}")
# self.maximum = value.valueString
return True
def set_values(self, values):
self._values = values
self._space_data_type = VariantType.VARIANT_STRING
# TODO validate that space is string
if self.tinc_client:
self.tinc_client.send_parameter_space(self)
def interactive_widget(self):
text_field = widgets.Textarea(
value=self._value,
description=self.id,
disabled=False,
continuous_update=True,
# orientation='horizontal',
readout=True,
# readout_format='.3f',
)
button = widgets.Button( description="Apply" )
def cb(button):
self.set_from_internal_widget(text_field.value)
button.on_click(cb)
self._interactive_widget = HBox((text_field, button))
return self._interactive_widget
class ParameterInt(Parameter):
def __init__(self, tinc_id: str, group: str = "", minimum: int = 0, maximum: int = 127, default_value: int = 0, tinc_client = None):
super().__init__(tinc_id, group, minimum = minimum, maximum = maximum, default_value = default_value, tinc_client = tinc_client)
def _init(self, default_value):
self._data_type = int
self.default = default_value
if default_value is None:
self.default = 0
self._value = self.default
def set_value_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
# print(f"set {value.valueFloat}")
if not self._value == value.valueInt32:
self._value = self._data_type(value.valueInt32)
if self._interactive_widget:
self._interactive_widget.children[0].value = self._data_type(value.valueInt32)
self._trigger_callbacks(self._value)
return True
def set_space_from_message(self, message):
values = TincProtocol.ParameterSpaceValues()
message.Unpack(values)
self._ids = list(values.ids)
count = len(values.values)
# print(f'setting space {count}')
self._values = np.ndarray((count))
for i, v in enumerate(values.values):
self.values[i] = v.valueInt32
return True
def set_min_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
# print(f"min {value.valueFloat}")
self._minimum = value.valueInt32
return True
def set_max_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
# print(f"max {value.valueFloat}")
self._maximum = value.valueInt32
return True
def interactive_widget(self):
if self._interactive_widget is None:
self._interactive_widget = interactive(self.set_from_internal_widget,
value=widgets.FloatSlider(
value=self._value,
min=self.minimum,
max=self.maximum,
description=self.id,
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
step =1
));
self._configure_widget()
return self._interactive_widget
def _configure_widget(self):
if self._interactive_widget is not None:
widget = self._interactive_widget.children[0]
if len(self._values) > 1:
min_step = np.min(np.diff(self._values))
else:
min_step = 1
num_zeros = np.floor(np.abs(np.log10(min_step)))
# Heuristics to determine display precision
temp_val = min_step * 10**(num_zeros)
while np.abs(temp_val - int(temp_val)) > 0.000001 and num_zeros < 7:
num_zeros += 1
temp_val = min_step * 10**(num_zeros)
format = f'.{int(num_zeros)}f'
widget.min = self._minimum
widget.max = self._maximum
widget.readout_format = format
widget.step = min_step
class ParameterChoice(Parameter):
def __init__(self, tinc_id: str, group: str = "", minimum: int = 0, maximum: int = 127, default_value: int = 0, tinc_client = None):
super().__init__(tinc_id, group, minimum = minimum, maximum = maximum, default_value = default_value, tinc_client = tinc_client)
def _init(self, default_value):
self._data_type = int
self.default = default_value
if default_value is None:
self.default = 0
self._value = self.default
self.elements = []
def set_value(self, value):
if value < self._minimum:
value = self._minimum
if value > self._maximum:
value = self._maximum
value = self._find_nearest(value)
self._value = self._data_type(value)
if self.tinc_client:
self.tinc_client.send_parameter_value(self)
if self._interactive_widget:
self._interactive_widget.children[0].value = self.elements[value]
self._trigger_callbacks(self._value)
def set_value_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
# print(f"set {value.valueUint64}")
if not self._value == value.valueUint64:
self._value = self._data_type(value.valueUint64)
if self._interactive_widget:
self._interactive_widget.children[0].value = self._data_type(value.valueUint64)
self._trigger_callbacks(self._value)
return True
def set_space_from_message(self, message):
values = TincProtocol.ParameterSpaceValues()
message.Unpack(values)
self._ids = values.ids
count = len(values.values)
# print(f'setting space {count}')
self._values = np.ndarray((count))
for i, v in enumerate(values.values):
self._values[i] = v.valueUint64
return True
def set_min_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
# print(f"min {value.valueFloat}")
self._minimum = value.valueUint64
return True
def set_max_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
# print(f"max {value.valueFloat}")
self._maximum = value.valueUint64
return True
def set_elements(self, elements):
self.elements = elements
if self._interactive_widget is not None:
self._configure_widget()
def get_current_elements(self):
b = self._value
current = []
for e in self.elements:
if b & 1 == 1:
current.append(e)
b = b >> 1
return current
def interactive_widget(self):
if self._interactive_widget is None:
self._interactive_widget = interactive(self.set_from_internal_widget,
widget_value=widgets.Dropdown(
options = [],
description = self.id,
disabled = False
));
self._configure_widget()
return self._interactive_widget
def _configure_widget(self):
if self._interactive_widget is not None:
widget = self._interactive_widget.children[0]
widget.options = self.elements
def set_from_internal_widget(self, widget_value):
# if len(self.values) > 0:
# value = self._find_nearest(value)
# if value == self._value:
# return
# self._value = value
# else:
try:
self._value = self.elements.index(widget_value)
self._interactive_widget.children[0].value = widget_value
if self.tinc_client:
self.tinc_client.send_parameter_value(self)
self._trigger_callbacks(self._value)
except:
print(f'Invalid value: {widget_value}')
class ParameterColor(Parameter):
# TODO merge color with ParameterVec, make ParameterColor sub class of ParameterVec
def __init__(self, tinc_id: str, group: str = "", default_value = [0,0,0,0], tinc_client = None):
super().__init__(tinc_id, group, default_value = default_value, tinc_client = tinc_client)
def _init(self, default_value):
self._data_type = lambda l: [float(f) for f in l]
self.default = default_value
if default_value is None:
self.default = [0,0,0,0]
self._value = self.default
self.minimum = [0,0,0,0]
self.maximum = [1,1,1,1]
def set_value_from_message(self, message):
value = TincProtocol.ParameterValue()
message.Unpack(value)
new_value = [v.valueFloat for v in value.valueList]
# print(f"set {value.valueFloat}")
if not self._value == new_value:
self._value = new_value
# if self._interactive_widget:
# self._interactive_widget.children[0].value = self._data_type(value.valueUint64)
self._trigger_callbacks(new_value)
return True
def set_space_from_message(self, message):
print("No parameter space for ParameterColor")
# values = TincProtocol.ParameterSpaceValues()
# message.Unpack(values)
# self.ids = values.ids
# count = len(values.values)
# # print(f'setting space {count}')
# self.values = np.ndarray((count))
# for i, v in enumerate(values.values):
# self.values[i] = v.valueUint64
return True
def set_min_from_message(self, message):
print("Can't set minimum for ParameterColor")
# value = TincProtocol.ParameterValue()
# message.Unpack(value)
# # print(f"min {value.valueFloat}")
# self.minimum = value.valueUint64
return True
def set_max_from_message(self, message):
print("Can't set maximum for ParameterColor")
# value = TincProtocol.ParameterValue()
# message.Unpack(value)
# # print(f"max {value.valueFloat}")
# self.maximum = value.valueUint64
return True
class ParameterBool(Parameter):
def __init__(self, tinc_id: str, group: str = "", default_value = False, tinc_client = None):
super().__init__(tinc_id, group, default_value = default_value, tinc_client = tinc_client)
def _init(self, default_value):
self._data_type = bool
self.default = | |
from functools import reduce
from copy import copy
from time import time
import numpy as np
import numpy.random as npr
import numpy.linalg as la
import scipy.linalg as sla
from scipy.linalg import solve_discrete_lyapunov, solve_discrete_are
from utility.matrixmath import vec, mat, mdot, matmul_lr, specrad, dlyap, dare, dare_gain
from quadtools import quadblock, quadstack, unquadblock, unquadstack
class LinearSystem:
def __init__(self, A, B, C, a, Aa, b, Bb, c, Cc, Q, W):
self.A = A
self.B = B
self.C = C
self.a = a
self.b = b
self.c = c
self.Aa = Aa
self.Bb = Bb
self.Cc = Cc
self.Q = Q
self.W = W
self.n = A.shape[0]
self.m = B.shape[1]
self.p = C.shape[0]
@property
def data(self):
return self.A, self.B, self.C, self.a, self.Aa, self.b, self.Bb, self.c, self.Cc, self.Q, self.W
@property
def dims(self):
return self.n, self.m, self.p
@property
def AB(self):
return np.block([self.A, self.B])
@property
def AC(self):
return np.block([[self.A], [self.C]])
class LinearSystemControlled(LinearSystem):
def __init__(self, system, K, L):
super().__init__(*system.data)
self.K = K
self.L = L
# Zeros matrices
self.Zn = np.zeros([self.n, self.n])
@property
def BK(self):
return self.B @ self.K
@property
def LC(self):
return self.L @ self.C
@property
def F(self):
return self.A + self.BK - self.LC
@property
def Phi_aug(self):
return np.block([[self.A, self.BK],
[self.LC, self.F]])
@property
def AK(self):
return self.A + self.BK
@property
def AL(self):
return self.A - self.LC
@property
def IK(self):
return np.block([[np.eye(self.n)], [self.K]])
@property
def IL(self):
return np.block([np.eye(self.n), self.L])
@property
def QK(self):
return matmul_lr(self.IK.T, self.Q)
@property
def WL(self):
return matmul_lr(self.IL, self.W)
@property
def IK_aug(self):
return sla.block_diag(np.eye(self.n), self.K)
@property
def IL_aug(self):
return sla.block_diag(np.eye(self.n), self.L)
@property
def QK_aug(self):
return matmul_lr(self.IK_aug.T, self.Q)
@property
def WL_aug(self):
return matmul_lr(self.IL_aug, self.W)
@property
def linop1(self):
# Closed-loop quadratic cost transition operator
linop = np.kron(self.Phi_aug.T, self.Phi_aug.T)
for i in range(self.a.size):
PhiAa = np.block([[self.Aa[i], self.Zn],
[self.Zn, self.Zn]])
linop += self.a[i]*np.kron(PhiAa.T, PhiAa.T)
for i in range(self.b.size):
PhiBb = np.block([[self.Zn, np.dot(self.Bb[i], self.K)],
[self.Zn, self.Zn]])
linop += self.b[i]*np.kron(PhiBb.T, PhiBb.T)
for i in range(self.c.size):
PhiCc = np.block([[self.Zn, self.Zn],
[np.dot(self.L, self.Cc[i]), self.Zn]])
linop += self.c[i]*np.kron(PhiCc.T, PhiCc.T)
return linop
@property
def linop2(self):
# Closed-loop second moment transition operator
linop = np.kron(self.Phi_aug, self.Phi_aug)
for i in range(self.a.size):
PhiAa = np.block([[self.Aa[i], self.Zn],
[self.Zn, self.Zn]])
linop += self.a[i]*np.kron(PhiAa, PhiAa)
for i in range(self.b.size):
PhiBb = np.block([[self.Zn, np.dot(self.Bb[i], self.K)],
[self.Zn, self.Zn]])
linop += self.b[i]*np.kron(PhiBb, PhiBb)
for i in range(self.c.size):
PhiCc = np.block([[self.Zn, self.Zn],
[np.dot(self.L, self.Cc[i]), self.Zn]])
linop += self.c[i]*np.kron(PhiCc, PhiCc)
return linop
@property
def P_aug(self):
linop = self.linop1
r = specrad(linop)
if r > 1:
return np.full((2*self.n, 2*self.n), np.inf)
else:
I = np.eye((2*self.n)*(2*self.n))
vQK = vec(self.QK_aug)
return mat(la.solve(I - linop, vQK))
@property
def S_aug(self):
linop = self.linop2
r = specrad(linop)
if r > 1:
return np.full((2*self.n, 2*self.n), np.inf)
else:
I = np.eye((2*self.n)*(2*self.n))
vWL = vec(self.WL_aug)
return mat(la.solve(I - linop, vWL))
@property
def X(self):
# NOTE: At the optimum, P_aug_xu + P_aug_uu = 0, but not for suboptimal policies.
# NOTE: At the optimum, S_aug_xy - S_aug_yy = 0, but not for suboptimal policies.
P_aug_xx, P_aug_uu, P_aug_xu, P_aug_ux = unquadblock(self.P_aug, self.n)
S_aug_xx, S_aug_yy, S_aug_xy, S_aug_yx = unquadblock(self.S_aug, self.n)
P = P_aug_xx + P_aug_xu + P_aug_ux + P_aug_uu
Phat = P_aug_uu
S = S_aug_xx - S_aug_xy - S_aug_yx + S_aug_yy
Shat = S_aug_yy
return quadstack(P, Phat, S, Shat)
def qfun(self, X):
P, Phat, S, Shat = unquadstack(X)
# Control Q-function (G)
# Get the noiseless part
G = self.Q + matmul_lr(self.AB.T, P)
# Add the noisy part in Guu block
Gxx, Guu, Gxu, Gux = unquadblock(G, self.n)
Guu += np.einsum('x,xji,jk,xkl->il', self.b, self.Bb, P, self.Bb)
Guu += np.einsum('x,xji,jk,xkl->il', self.b, self.Bb, Phat, self.Bb)
# Estimator Q-function (H)
# Get the noiseless part in Hyy block
H = self.W + matmul_lr(self.AC, S)
# Add the noisy part
Hxx, Hyy, Hxy, Hyx = unquadblock(H, self.n)
Hyy += np.einsum('x,xij,jk,xlk->il', self.c, self.Cc, S, self.Cc)
Hyy += np.einsum('x,xij,jk,xlk->il', self.c, self.Cc, Shat, self.Cc)
# Compute gains for use in computing the Gxx, Hxx blocks
K = -la.solve(Guu, Gux) # Control gain u = K*x
L = la.solve(Hyy, Hyx).T # Estimator gain xhat = A*x + B*u + L*(y - C*xhat)
LX2L = np.dot(L.T, np.dot(Phat, L))
KX4K = np.dot(K, np.dot(Shat, K.T))
Gxx += np.einsum('x,xji,jk,xkl->il', self.a, self.Aa, P, self.Aa)
Gxx += np.einsum('x,xji,jk,xkl->il', self.a, self.Aa, Phat, self.Aa)
Gxx += np.einsum('x,xji,jk,xkl->il', self.c, self.Cc, LX2L, self.Cc)
Hxx += np.einsum('x,xij,jk,xlk->il', self.a, self.Aa, S, self.Aa)
Hxx += np.einsum('x,xij,jk,xlk->il', self.a, self.Aa, Shat, self.Aa)
Hxx += np.einsum('x,xij,jk,xlk->il', self.b, self.Bb, KX4K, self.Bb)
# Put the blocks together
G = quadblock(Gxx, Guu, Gxu, Gux)
H = quadblock(Hxx, Hyy, Hxy, Hyx)
return G, H
def print_diagnostic(self, X, K, L, X_opt):
P_opt, Phat_opt, S_opt, Shat_opt = unquadstack(X_opt)
P, Phat, S, Shat = unquadstack(X)
print("[" + ' '.join(["%+.6e" % val for val in K[0]]) + "] ", end='')
print("[" + ' '.join(["%+.6e" % val for val in L.T[0]]) + "] ", end='')
print("%.6e " % la.norm(P - P_opt), end='')
print("%.6e " % la.norm(Phat - Phat_opt), end='')
print("%.6e " % la.norm(S - S_opt), end='')
print("%.6e " % la.norm(Shat - Shat_opt), end='')
print('')
def policy_evaluation(self):
# Compute value function based on current policy
# It is trivially (if tediously) verified that
# P == P_aug_xx + P_aug_xu + P_aug_ux + P_aug_uu
# S == S_aug_xx - S_aug_xy - S_aug_yx + S_aug_yy
# by expanding the relevant Lyapunov equations.
return self.X
def policy_improvement(self, X, return_qfun=False):
# Compute state-action value matrices
G, H = self.qfun(X)
Gxx, Guu, Gxu, Gux = unquadblock(G, self.n)
Hxx, Hyy, Hxy, Hyx = unquadblock(H, self.n)
# Compute gains that improve based on current state-action value functions
K = -la.solve(Guu, Gux) # Control gain, u = K @ x
L = la.solve(Hyy, Hyx).T # Estimator gain, xhat = A @ x + B @ u + L @ (y - C @ xhat)
if return_qfun:
return K, L, G, H
else:
return K, L
def ricc(self, X):
# Riccati operator for multiplicative noise LQG
# See <NAME>, TAC 1992 https://ieeexplore.ieee.org/document/135491
# Get gain and Q function
K, L, G, H = self.policy_improvement(X, return_qfun=True)
Gxx, Guu, Gxu, Gux = unquadblock(G, self.n)
Hxx, Hyy, Hxy, Hyx = unquadblock(H, self.n)
# Closed-loop system matrices
ABK = self.A + np.dot(self.B, K)
ALC = self.A - np.dot(L, self.C)
# Form the RHS
Z1 = np.dot(Gxu, la.solve(Guu, Gux))
Z3 = np.dot(Hxy, la.solve(Hyy, Hyx))
E = np.dot(ALC.T, np.dot(X[1], ALC))
F = np.dot(ABK, np.dot(X[3], ABK.T))
Y1 = Gxx - Z1
Y2 = E + Z1
Y3 = Hxx - Z3
Y4 = F + Z3
return quadstack(Y1, Y2, Y3, Y4)
def policy_iteration(self, num_iters, convergence_tol=1e-12, show_diagnostic=False, save_hist=False, X_opt=None):
if save_hist:
X_hist = np.zeros([num_iters+1, 4, self.n, self.n])
K_hist = np.zeros([num_iters+1, self.m, self.n])
L_hist = np.zeros([num_iters+1, self.n, self.p])
K_hist[0] = np.copy(self.K)
L_hist[0] = np.copy(self.L)
i = 0
diff_mag = np.inf
X = np.full(shape=(self.n, self.n), fill_value=np.inf)
time_start = time()
while diff_mag > convergence_tol:
X_last = np.copy(X)
if i >= num_iters:
break
X = self.policy_evaluation()
K, L = self.policy_improvement(X)
self.K, self.L = K, L
diff_mag = la.norm(X - X_last)
if save_hist:
X_hist[i] = X
K_hist[i+1] = K
L_hist[i+1] = L
if show_diagnostic:
self.print_diagnostic(X, K, L, X_opt)
i += 1
time_end = time()
time_elapsed = time_end - time_start
# Truncate unused portion
if save_hist:
if i < num_iters:
X_hist = X_hist[0:i]
K_hist = K_hist[0:i]
L_hist = L_hist[0:i]
X = self.policy_evaluation()
if save_hist:
X_hist[-1] = X
if save_hist:
return dict(X=X, K=K, L=L, X_hist=X_hist, K_hist=K_hist, L_hist=L_hist, time_elapsed=time_elapsed)
else:
return dict(X=X, K=K, L=L, time_elapsed=time_elapsed)
def value_iteration(self, num_iters, convergence_tol=1e-12, show_diagnostic=False, save_hist=False, X_opt=None):
X = np.copy(self.X)
if save_hist:
X_hist = np.zeros([num_iters+1, 4, self.n, self.n])
K_hist = np.zeros([num_iters+1, self.m, self.n])
L_hist = np.zeros([num_iters+1, self.n, self.p])
X_hist[0] = X
i = 0
diff_mag = np.inf
time_start = time()
while diff_mag > convergence_tol:
X_last = np.copy(X)
if i >= num_iters:
break
X = self.ricc(X)
diff_mag = la.norm(X - X_last)
if show_diagnostic or save_hist:
K, L = self.policy_improvement(X)
if save_hist:
X_hist[i+1] = X
K_hist[i] = K
L_hist[i] = L
if show_diagnostic:
self.print_diagnostic(X, K, L, X_opt)
i += 1
time_end = time()
time_elapsed = time_end - time_start
# Truncate unused portion
if save_hist:
if i < num_iters:
X_hist = X_hist[0:i]
K_hist = K_hist[0:i]
L_hist = L_hist[0:i]
K, L | |
self.species_dict[label].rotors_dict.values():
if rotor['scan'] == job.scan:
used_trsh_methods = rotor['trsh_methods']
break
# A lower conformation was found
if 'change conformer' in methods:
# We will delete all of the jobs no matter we can successfully change to the conformer.
# If succeed, we have to cancel jobs to avoid conflicts
# If not succeed, we are in a situation that we find a lower conformer, but either
# this is a incorrect conformer or we have applied this troubleshooting before, but it
# didn't yield a good result.
self.delete_all_species_jobs(label)
new_xyz = methods['change conformer']
# Check if the same conformer is used in previous troubleshooting
for used_trsh_method in used_trsh_methods:
if 'change conformer' in used_trsh_method \
and compare_confs(new_xyz, used_trsh_method['change conformer']):
# Find we have used this conformer for troubleshooting. Invalid the troubleshooting.
logger.error(f'The change conformer method for {label} is invalid. '
f'ARC will not change to the same conformer twice.')
break
else:
# If the conformer is not used, check isomorphism
is_isomorphic = self.species_dict[label].check_xyz_isomorphism(
allow_nonisomorphic_2d=self.allow_nonisomorphic_2d,
xyz=new_xyz)
if is_isomorphic:
self.species_dict[label].final_xyz = new_xyz
# Remove all completed rotor calculation information
for rotor in self.species_dict[label].rotors_dict.values():
# don't initialize all parameters, e.g., `times_dihedral_set` needs to remain as is
rotor['scan_path'] = ''
rotor['invalidation_reason'] = ''
rotor['success'] = None
rotor.pop('symmetry', None)
if rotor['scan'] == job.scan:
rotor['times_dihedral_set'] += 1
# We can save the change conformer trsh info, but other trsh methods like
# freezing or increasing scan resolution can be cleaned, otherwise, they may
# not be troubleshot
rotor['trsh_methods'] = [trsh_method for trsh_method in rotor['trsh_methods']
if 'change conformer' in trsh_method]
# re-run opt (or composite) on the new initial_xyz with the desired dihedral
if not self.composite_method:
self.run_opt_job(label)
else:
self.run_composite_job(label)
trsh_success = True
actual_actions = methods
return trsh_success, actual_actions
# The conformer is wrong, or we are in a loop changing to the same conformers again
self.output[label]['errors'] += \
f'A lower conformer was found for {label} via a torsion mode, ' \
f'but it is not isomorphic with the 2D graph representation ' \
f'{self.species_dict[label].mol.copy(deep=True).to_smiles()}. ' \
f'Not calculating this species.'
self.output[label]['conformers'] += 'Unconverged'
self.output[label]['convergence'] = False
else:
# Freezing or increasing scan resolution
scan_list = [rotor_dict['scan'] for rotor_dict in
self.species_dict[label].rotors_dict.values()]
try:
scan_trsh, scan_res = trsh_scan_job(label=label,
scan_res=job.scan_res,
scan=job.scan,
scan_list=scan_list,
methods=methods,
log_file=job.local_path_to_output_file,
)
except TrshError as e:
logger.error(f'Troubleshooting of the rotor scan on {job.scan} for '
f'{label} failed. Got: {e}\nJob info:\n{job}')
except InputError as e:
logger.debug(f'Got invalid input for trsh_scan_job: {e}\nJob info:\n{job}')
else:
if scan_trsh or job.scan_res != scan_res \
and {'scan_trsh': scan_trsh, 'scan_res': scan_res} not in used_trsh_methods:
# Valid troubleshooting method for freezing or increasing resolution
trsh_success = True
actual_actions = {'scan_trsh': scan_trsh, 'scan_res': scan_res}
self.run_job(label=label,
xyz=job.xyz,
level_of_theory=job.level,
job_type='scan',
scan=job.scan,
pivots=job.pivots,
scan_trsh=scan_trsh,
scan_res=scan_res)
return trsh_success, actual_actions
def troubleshoot_opt_jobs(self, label):
"""
We're troubleshooting for opt jobs.
First check for server status and troubleshoot if needed. Then check for ESS status and troubleshoot
if needed. Finally, check whether or not the last job had fine=True, add if it didn't run with fine.
Args:
label (str): The species label.
"""
previous_job_num, latest_job_num = -1, -1
job = None
for job_name in self.job_dict[label]['opt'].keys(): # get latest Job object for the species / TS
job_name_int = int(job_name[5:])
if job_name_int > latest_job_num:
previous_job_num = latest_job_num
latest_job_num = job_name_int
job = self.job_dict[label]['opt'][job_name]
if job.job_status[0] == 'done':
if job.job_status[1]['status'] == 'done':
if job.fine:
# run_opt_job should not be called if all looks good...
logger.error(f'opt job for {label} seems right, yet "run_opt_job" was called.')
raise SchedulerError(f'opt job for {label} seems right, yet "run_opt_job" was called.')
else:
# Run opt again using a finer grid.
self.parse_opt_geo(label=label, job=job)
xyz = self.species_dict[label].final_xyz
self.species_dict[label].initial_xyz = xyz # save for troubleshooting, since trsh goes by initial
self.run_job(label=label, xyz=xyz, level_of_theory=self.opt_level, job_type='opt', fine=True)
else:
trsh_opt = True
# job passed on the server, but failed in ESS calculation
if previous_job_num >= 0 and job.fine:
previous_job = self.job_dict[label]['opt']['opt_a' + str(previous_job_num)]
if not previous_job.fine and previous_job.job_status[0] == 'done' \
and previous_job.job_status[1]['status'] == 'done':
# The present job with a fine grid failed in the ESS calculation.
# A *previous* job without a fine grid terminated successfully on the server and ESS.
# So use the xyz determined w/o the fine grid, and output an error message to alert users.
logger.error(f'Optimization job for {label} with a fine grid terminated successfully '
f'on the server, but crashed during calculation. NOT running with fine '
f'grid again.')
self.parse_opt_geo(label=label, job=previous_job)
trsh_opt = False
if trsh_opt:
self.troubleshoot_ess(label=label,
job=job,
level_of_theory=self.opt_level)
else:
job.troubleshoot_server()
def troubleshoot_ess(self,
label: str,
job: Job,
level_of_theory: Union[Level, dict, str],
conformer: int = -1):
"""
Troubleshoot issues related to the electronic structure software, such as conversion.
Args:
label (str): The species label.
job (Job): The job object to troubleshoot.
level_of_theory (Level, dict, str): The level of theory to use.
conformer (int, optional): The conformer index.
"""
level_of_theory = Level(repr=level_of_theory)
logger.info('\n')
logger.warning(f'Troubleshooting {label} job {job.job_name} which failed with status: '
f'"{job.job_status[1]["status"]},"\n'
f'with keywords: {job.job_status[1]["keywords"]}\n'
f'in {job.software}.\n'
f'The error "{job.job_status[1]["error"]}" was derived from the following line in the log '
f'file:\n"{job.job_status[1]["line"]}".')
if conformer != -1:
xyz = self.species_dict[label].conformers[conformer]
else:
xyz = self.species_dict[label].final_xyz or self.species_dict[label].initial_xyz
if 'Unknown' in job.job_status[1]['keywords'] and 'change_node' not in job.ess_trsh_methods:
job.ess_trsh_methods.append('change_node')
job.troubleshoot_server()
if job.job_name not in self.running_jobs[label]:
self.running_jobs[label].append(job.job_name) # mark as a running job
if job.software == 'gaussian':
if self.species_dict[label].checkfile is None:
self.species_dict[label].checkfile = job.checkfile
# determine if the species is a hydrogen (or its isotope) atom
is_h = self.species_dict[label].number_of_atoms == 1 and \
self.species_dict[label].mol.atoms[0].element.symbol in ['H', 'D', 'T']
output_errors, ess_trsh_methods, remove_checkfile, level_of_theory, \
software, job_type, fine, trsh_keyword, memory, shift, cpu_cores, dont_rerun = \
trsh_ess_job(label=label,
level_of_theory=level_of_theory,
server=job.server,
job_status=job.job_status[1],
is_h=is_h,
job_type=job.job_type,
num_heavy_atoms=self.species_dict[label].number_of_heavy_atoms,
software=job.software,
fine=job.fine,
memory_gb=job.total_job_memory_gb,
cpu_cores=job.cpu_cores,
ess_trsh_methods=job.ess_trsh_methods,
available_ess=list(self.ess_settings.keys()),
)
for output_error in output_errors:
self.output[label]['errors'] += output_error
if remove_checkfile:
self.species_dict[label].checkfile = None
job.ess_trsh_methods = ess_trsh_methods
if not dont_rerun:
self.run_job(label=label,
xyz=xyz,
level_of_theory=level_of_theory,
software=software,
memory=memory,
job_type=job_type,
fine=fine,
ess_trsh_methods=ess_trsh_methods,
trsh=trsh_keyword,
conformer=conformer,
scan=job.scan,
pivots=job.pivots,
scan_res=job.scan_res,
shift=shift,
directed_dihedrals=job.directed_dihedrals,
directed_scans=job.directed_scans,
directed_scan_type=job.directed_scan_type,
rotor_index=job.rotor_index,
cpu_cores=cpu_cores,
)
self.save_restart_dict()
def troubleshoot_conformer_isomorphism(self, label: str):
"""
Troubleshoot conformer optimization for a species that failed isomorphic test in
``determine_most_stable_conformer``.
Args:
label (str): The species label.
"""
if self.species_dict[label].is_ts:
raise SchedulerError('The troubleshoot_conformer_isomorphism() method does not yet deal with TSs.')
num_of_conformers = len(self.species_dict[label].conformers)
if not num_of_conformers:
raise SchedulerError('The troubleshoot_conformer_isomorphism() method got zero conformers.')
# use the first conformer of a species to determine applicable troubleshooting method
job = self.job_dict[label]['conformers'][0]
level_of_theory = trsh_conformer_isomorphism(software=job.software, ess_trsh_methods=job.ess_trsh_methods)
if level_of_theory is None:
logger.error(f'ARC has attempted all built-in conformer isomorphism troubleshoot methods for species '
f'{label}. No conformer for this species was found to be isomorphic with the 2D graph '
f'representation {self.species_dict[label].mol.copy(deep=True).to_smiles()}. '
f'NOT optimizing this species.')
self.output[label]['conformers'] += 'Error: No conformer was found to be isomorphic with the 2D' \
' graph representation!; '
else:
logger.info(f'Troubleshooting conformer job in {job.software} using {level_of_theory} for species {label}')
# rerun conformer job at higher level for all conformers
for conformer in range(0, num_of_conformers):
# initial xyz before troubleshooting
xyz = self.species_dict[label].conformers_before_opt[conformer]
job = self.job_dict[label]['conformers'][conformer]
if 'Conformers: ' + level_of_theory not in job.ess_trsh_methods:
job.ess_trsh_methods.append('Conformers: ' + level_of_theory)
self.run_job(label=label, xyz=xyz, level_of_theory=level_of_theory, software=job.software,
job_type='conformer', ess_trsh_methods=job.ess_trsh_methods, conformer=conformer)
def delete_all_species_jobs(self, label: str):
"""
Delete all jobs of a species/TS.
Args:
label (str): The species label.
"""
logger.debug(f'Deleting all jobs for species {label}')
for job_dict in self.job_dict[label].values():
for job_name, job in job_dict.items():
if job_name in self.running_jobs[label]:
logger.info(f'Deleted job {job_name}')
job.delete()
self.running_jobs[label] = list()
def restore_running_jobs(self):
"""
Make Job objects for jobs which were running in the previous session.
Important for the restart feature so long jobs won't be ran twice.
"""
jobs = self.restart_dict['running_jobs']
if not jobs or not any([job for job in jobs.values()]):
del self.restart_dict['running_jobs']
self.running_jobs = dict()
logger.debug('It seems that there are no running jobs specified in the ARC restart file. '
'Assuming all jobs have finished.')
else:
for spc_label in jobs.keys():
if spc_label not in self.running_jobs:
self.running_jobs[spc_label] = list()
for job_description in jobs[spc_label]:
if 'conformer' not in job_description or job_description['conformer'] < 0:
self.running_jobs[spc_label].append(job_description['job_name'])
else:
self.running_jobs[spc_label].append(f'conformer{job_description["conformer"]}')
for species in self.species_list:
if species.label == spc_label:
break
else:
raise SchedulerError(f'Could not find | |
the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
true labels for observations, should have shape (N, K) for `K` runs
each consisting of `N` observations if `obs_axis`
yhat : np.ndarray[bool, int32, int64, float32, float64], default=None
the predicted labels, the same dtypes are supported as y. Can be `None`
if `scores` is not `None`, if both are provided, `scores` is ignored.
`yhat` shape must be compatible with `y`.
scores : np.ndarray[float32, float64], default=None
the classifier scores to be evaluated against the `threshold`, i.e.
`yhat` = `scores` >= `threshold`. Can be `None` if `yhat` is not `None`,
if both are provided, this parameter is ignored.
`scores` shape must be compatible with `y`.
thresholds : np.ndarray[float32, float64]
the classification thresholds for which the classifier scores is evaluated,
is inclusive.
obs_axis : int, default=0
the axis containing the observations for a single run, e.g. 0 when the
labels and scoress are stored as columns
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return the metrics confusion matrix and metrics as a DataFrame
Returns
-------
conf_mat : np.ndarray, pd.DataFrame
the confusion_matrices where the rows contain the counts for a
run, [i, 0] = TN, [i, 1] = FP, [i, 2] = FN, [i, 3] = TP
metrics : np.ndarray, pd.DataFrame
the computed metrics where the rows contain the metrics for a single
run
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
if not isinstance(obs_axis, int) or (obs_axis != 0 and obs_axis != 1):
raise TypeError("`obs_axis` must be either 0 or 1.")
y = check_array(
y,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_ext_types,
)
if scores is not None:
scores = check_array(
scores,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_float,
)
if not isinstance(threshold, float):
raise TypeError("`threshold` must be a float if scores is not None")
if scores.size != y.size:
raise ValueError('`scores` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_score_runs(y, scores, threshold, obs_axis)
elif yhat is not None:
yhat = check_array(
yhat,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_ext_types,
)
if yhat.size != y.size:
raise ValueError('`yhat` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_runs(y, yhat, obs_axis)
else:
raise TypeError("`yhat` must not be None if `scores` is None")
metrics = _core.binary_metrics_2d(conf_mat, fill)
if return_df:
return (
confusion_matrices_to_dataframe(conf_mat),
metrics_to_dataframe(metrics)
)
return conf_mat, metrics
def binary_metrics_runs_thresholds(
y, scores, thresholds, n_obs=None, fill=1.0, obs_axis=0):
"""Compute binary classification metrics over runs and thresholds.
`bmetrics_runs_thresh` is an alias for this function.
Computes the following metrics where [i] indicates the i'th column in the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
the ground truth labels, if different runs have different number of
observations the n_obs parameter must be set to avoid computing metrics
of the filled values. If ``y`` is one dimensional and ``scores`` is not
the ``y`` values are assumed to be the same for each run.
scores : np.array[float32, float64]
the classifier scoress, if different runs have different number of
observations the n_obs parameter must be set to avoid computing metrics
of the filled values.
thresholds : np.array[float32, float64]
classification thresholds
n_obs : np.array[int64], default=None
the number of observations per run, if None the same number of
observations are assumed exist for each run.
fill : double, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
obs_axis : {0, 1}, default=0
0 if the observations for a single run is a column (e.g. from
pd.DataFrame) and 1 otherwhise
Returns
-------
conf_mat : np.ndarray[int64]
3D array where the rows contain the counts for a threshold,
the columns the confusion matrix entries and the slices the counts for
a run
metrics : np.ndarray[float64]
3D array where the first axis is the threshold, the second the metrics
and the third the run
"""
thresholds = check_array(
thresholds,
max_dim=1,
dtype_check=_convert_to_float,
)
scores = check_array(
scores,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_float,
)
n_runs = scores.shape[1 - obs_axis]
max_obs = scores.shape[obs_axis]
if y.ndim == 1:
y = np.tile(y[:, None], n_runs)
elif y.shape[1] == 1 and y.shape[0] >= 2:
y = np.tile(y, n_runs)
y = check_array(
y,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_ext_types,
)
n_thresholds = thresholds.size
if n_obs is None:
n_obs = np.repeat(max_obs, n_runs)
cm = _core.confusion_matrix_runs_thresholds(
y, scores, thresholds, n_obs
)
mtr = _core.binary_metrics_2d(cm, fill)
# cm and mtr are both flat arrays with order conf_mat, thresholds, runs
# as this is fastest to create. However, how the cubes will be sliced
# later doesn't align with this. So we incur a copy such that the cubes
# have the optimal strides for further processing
if n_thresholds == 1:
# create cube from flat array
cm = cm.reshape(n_runs, 4, order='C')
else:
# create cube from flat array
cm = cm.reshape(n_runs, n_thresholds, 4, order='C')
# reorder such that with F-order we get from smallest to largest
# strides: conf_mat, runs, thresholds
cm = np.swapaxes(np.swapaxes(cm, 0, 2), 1, 2)
# make values over the confusion matrix and runs contiguous
cm = np.asarray(cm, order='F')
# change order s.t. we have thresholds, conf_mat, runs
cm = np.swapaxes(cm.T, 1, 2)
# create cube from flat array
# order is runs, thresholds, metrics
if n_thresholds == 1:
# make values over the runs contiguous
mtr = np.asarray(mtr.reshape(n_runs, 10, order='C'), order='F')
else:
mtr = mtr.reshape(n_runs, n_thresholds, 10, order='C')
# make values over the runs contiguous
mtr = np.asarray(mtr, order='F')
# change order s.t. we have thresholds, metrics, runs
mtr = np.swapaxes(mtr.T, 0, 1)
return cm, mtr
def precision_recall(y, yhat=None, scores=None, threshold=None, fill=1.0, return_df=False):
r"""Compute precision and recall.
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
true labels for observations
yhat : np.ndarray[bool, int32, int64, float32, float64], default=None
the predicted labels, the same dtypes are supported as y. Can be `None`
if `scores` is not `None`, if both are provided, `scores` is ignored.
scores : np.ndarray[float32, float64], default=None
the classifier scores to be evaluated against the `threshold`, i.e.
`yhat` = `scores` >= `threshold`. Can be `None` if `yhat` is not `None`,
if both are provided, this parameter is ignored.
threshold : float, default=0.5
the classification threshold to which the classifier scores is evaluated,
is inclusive.
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return confusion matrix as pd.DataFrame
Returns
-------
confusion_matrix : np.ndarray, pd.DataFrame
the confusion_matrix with layout
[0, 0] = TN, [0, 1] = FP, [1, 0] = FN, [1, 1] = TP
prec_rec : np.ndarray, pd.DataFrame
precision and recall
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
y = check_array(
y,
max_dim=1,
dtype_check=_convert_to_ext_types
)
if scores is not None:
scores = check_array(
scores,
max_dim=1,
dtype_check=_convert_to_float,
)
if not isinstance(threshold, float):
raise TypeError("`threshold` must be a float if scores is not None")
if scores.size != y.size:
raise ValueError('`scores` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_score(y, scores, threshold)
elif yhat is not None:
yhat = check_array(
yhat,
max_dim=1,
dtype_check=_convert_to_ext_types,
)
if yhat.size != y.size:
raise ValueError('`yhat` and `y` must have equal length.')
conf_mat = _core.confusion_matrix(y, yhat)
else:
raise TypeError("`yhat` must not be | |
m.b3009 <= 0)
m.c875 = Constraint(expr= m.x874 - m.b3009 <= 0)
m.c876 = Constraint(expr= m.x875 - m.b3009 <= 0)
m.c877 = Constraint(expr= m.x876 - m.b3009 <= 0)
m.c878 = Constraint(expr= m.x877 - m.b3009 <= 0)
m.c879 = Constraint(expr= m.x878 - m.b3009 <= 0)
m.c880 = Constraint(expr= m.x879 - m.b3009 <= 0)
m.c881 = Constraint(expr= m.x880 - m.b3009 <= 0)
m.c882 = Constraint(expr= m.x881 - m.b3009 <= 0)
m.c883 = Constraint(expr= m.x882 - m.b3009 <= 0)
m.c884 = Constraint(expr= m.x883 - m.b3009 <= 0)
m.c885 = Constraint(expr= m.x884 - m.b3009 <= 0)
m.c886 = Constraint(expr= m.x885 - m.b3009 <= 0)
m.c887 = Constraint(expr= m.x886 - m.b3009 <= 0)
m.c888 = Constraint(expr= m.x887 - m.b3009 <= 0)
m.c889 = Constraint(expr= m.x888 - m.b3009 <= 0)
m.c890 = Constraint(expr= m.x889 - m.b3009 <= 0)
m.c891 = Constraint(expr= m.x890 - m.b3009 <= 0)
m.c892 = Constraint(expr= m.x891 - m.b3009 <= 0)
m.c893 = Constraint(expr= m.x892 - m.b3009 <= 0)
m.c894 = Constraint(expr= m.x893 - m.b3009 <= 0)
m.c895 = Constraint(expr= m.x894 - m.b3009 <= 0)
m.c896 = Constraint(expr= m.x895 - m.b3009 <= 0)
m.c897 = Constraint(expr= m.x896 - m.b3009 <= 0)
m.c898 = Constraint(expr= m.x897 - m.b3009 <= 0)
m.c899 = Constraint(expr= m.x898 - m.b3009 <= 0)
m.c900 = Constraint(expr= m.x899 - m.b3009 <= 0)
m.c901 = Constraint(expr= m.x900 - m.b3009 <= 0)
m.c902 = Constraint(expr= m.x901 - m.b3010 <= 0)
m.c903 = Constraint(expr= m.x902 - m.b3010 <= 0)
m.c904 = Constraint(expr= m.x903 - m.b3010 <= 0)
m.c905 = Constraint(expr= m.x904 - m.b3010 <= 0)
m.c906 = Constraint(expr= m.x905 - m.b3010 <= 0)
m.c907 = Constraint(expr= m.x906 - m.b3010 <= 0)
m.c908 = Constraint(expr= m.x907 - m.b3010 <= 0)
m.c909 = Constraint(expr= m.x908 - m.b3010 <= 0)
m.c910 = Constraint(expr= m.x909 - m.b3010 <= 0)
m.c911 = Constraint(expr= m.x910 - m.b3010 <= 0)
m.c912 = Constraint(expr= m.x911 - m.b3010 <= 0)
m.c913 = Constraint(expr= m.x912 - m.b3010 <= 0)
m.c914 = Constraint(expr= m.x913 - m.b3010 <= 0)
m.c915 = Constraint(expr= m.x914 - m.b3010 <= 0)
m.c916 = Constraint(expr= m.x915 - m.b3010 <= 0)
m.c917 = Constraint(expr= m.x916 - m.b3010 <= 0)
m.c918 = Constraint(expr= m.x917 - m.b3010 <= 0)
m.c919 = Constraint(expr= m.x918 - m.b3010 <= 0)
m.c920 = Constraint(expr= m.x919 - m.b3010 <= 0)
m.c921 = Constraint(expr= m.x920 - m.b3010 <= 0)
m.c922 = Constraint(expr= m.x921 - m.b3010 <= 0)
m.c923 = Constraint(expr= m.x922 - m.b3010 <= 0)
m.c924 = Constraint(expr= m.x923 - m.b3010 <= 0)
m.c925 = Constraint(expr= m.x924 - m.b3010 <= 0)
m.c926 = Constraint(expr= m.x925 - m.b3010 <= 0)
m.c927 = Constraint(expr= m.x926 - m.b3010 <= 0)
m.c928 = Constraint(expr= m.x927 - m.b3010 <= 0)
m.c929 = Constraint(expr= m.x928 - m.b3010 <= 0)
m.c930 = Constraint(expr= m.x929 - m.b3010 <= 0)
m.c931 = Constraint(expr= m.x930 - m.b3010 <= 0)
m.c932 = Constraint(expr= m.x931 - m.b3010 <= 0)
m.c933 = Constraint(expr= m.x932 - m.b3010 <= 0)
m.c934 = Constraint(expr= m.x933 - m.b3010 <= 0)
m.c935 = Constraint(expr= m.x934 - m.b3010 <= 0)
m.c936 = Constraint(expr= m.x935 - m.b3010 <= 0)
m.c937 = Constraint(expr= m.x936 - m.b3010 <= 0)
m.c938 = Constraint(expr= m.x937 - m.b3010 <= 0)
m.c939 = Constraint(expr= m.x938 - m.b3010 <= 0)
m.c940 = Constraint(expr= m.x939 - m.b3010 <= 0)
m.c941 = Constraint(expr= m.x940 - m.b3010 <= 0)
m.c942 = Constraint(expr= m.x941 - m.b3010 <= 0)
m.c943 = Constraint(expr= m.x942 - m.b3010 <= 0)
m.c944 = Constraint(expr= m.x943 - m.b3010 <= 0)
m.c945 = Constraint(expr= m.x944 - m.b3010 <= 0)
m.c946 = Constraint(expr= m.x945 - m.b3010 <= 0)
m.c947 = Constraint(expr= m.x946 - m.b3010 <= 0)
m.c948 = Constraint(expr= m.x947 - m.b3010 <= 0)
m.c949 = Constraint(expr= m.x948 - m.b3010 <= 0)
m.c950 = Constraint(expr= m.x949 - m.b3010 <= 0)
m.c951 = Constraint(expr= m.x950 - m.b3010 <= 0)
m.c952 = Constraint(expr= m.x951 - m.b3010 <= 0)
m.c953 = Constraint(expr= m.x952 - m.b3010 <= 0)
m.c954 = Constraint(expr= m.x953 - m.b3010 <= 0)
m.c955 = Constraint(expr= m.x954 - m.b3010 <= 0)
m.c956 = Constraint(expr= m.x955 - m.b3010 <= 0)
m.c957 = Constraint(expr= m.x956 - m.b3010 <= 0)
m.c958 = Constraint(expr= m.x957 - m.b3010 <= 0)
m.c959 = Constraint(expr= m.x958 - m.b3010 <= 0)
m.c960 = Constraint(expr= m.x959 - m.b3010 <= 0)
m.c961 = Constraint(expr= m.x960 - m.b3010 <= 0)
m.c962 = Constraint(expr= m.x961 - m.b3010 <= 0)
m.c963 = Constraint(expr= m.x962 - m.b3010 <= 0)
m.c964 = Constraint(expr= m.x963 - m.b3010 <= 0)
m.c965 = Constraint(expr= m.x964 - m.b3010 <= 0)
m.c966 = Constraint(expr= m.x965 - m.b3010 <= 0)
m.c967 = Constraint(expr= m.x966 - m.b3010 <= 0)
m.c968 = Constraint(expr= m.x967 - m.b3010 <= 0)
m.c969 = Constraint(expr= m.x968 - m.b3010 <= 0)
m.c970 = Constraint(expr= m.x969 - m.b3010 <= 0)
m.c971 = Constraint(expr= m.x970 - m.b3010 <= 0)
m.c972 = Constraint(expr= m.x971 - m.b3010 <= 0)
m.c973 = Constraint(expr= m.x972 - m.b3010 <= 0)
m.c974 = Constraint(expr= m.x973 - m.b3010 <= 0)
m.c975 = Constraint(expr= m.x974 - m.b3010 <= 0)
m.c976 = Constraint(expr= m.x975 - m.b3010 <= 0)
m.c977 = Constraint(expr= m.x976 - m.b3010 <= 0)
m.c978 = Constraint(expr= m.x977 - m.b3010 <= 0)
m.c979 = Constraint(expr= m.x978 - m.b3010 <= 0)
m.c980 = Constraint(expr= m.x979 - m.b3010 <= 0)
m.c981 = Constraint(expr= m.x980 - m.b3010 <= 0)
m.c982 = Constraint(expr= m.x981 - m.b3010 <= 0)
m.c983 = Constraint(expr= m.x982 - m.b3010 <= 0)
m.c984 = Constraint(expr= m.x983 - m.b3010 <= 0)
m.c985 = Constraint(expr= m.x984 - m.b3010 <= 0)
m.c986 = Constraint(expr= m.x985 - m.b3010 <= 0)
m.c987 = Constraint(expr= m.x986 - m.b3010 <= 0)
m.c988 = Constraint(expr= m.x987 - m.b3010 <= 0)
m.c989 = Constraint(expr= m.x988 - m.b3010 <= 0)
m.c990 = Constraint(expr= m.x989 - m.b3010 <= 0)
m.c991 = Constraint(expr= m.x990 - m.b3010 <= 0)
m.c992 = Constraint(expr= m.x991 - m.b3010 <= 0)
m.c993 = Constraint(expr= m.x992 - m.b3010 <= 0)
m.c994 = Constraint(expr= m.x993 - m.b3010 <= 0)
m.c995 = Constraint(expr= m.x994 - m.b3010 <= 0)
m.c996 = Constraint(expr= m.x995 - m.b3010 <= 0)
m.c997 = Constraint(expr= m.x996 - m.b3010 <= 0)
m.c998 = Constraint(expr= m.x997 - m.b3010 <= 0)
m.c999 = Constraint(expr= m.x998 - m.b3010 <= 0)
m.c1000 = Constraint(expr= m.x999 - m.b3010 <= 0)
m.c1001 = Constraint(expr= m.x1000 - m.b3010 <= 0)
m.c1002 = Constraint(expr= m.x1001 - m.b3011 <= 0)
m.c1003 = Constraint(expr= m.x1002 - m.b3011 <= 0)
m.c1004 = Constraint(expr= m.x1003 - m.b3011 <= 0)
m.c1005 = Constraint(expr= m.x1004 - m.b3011 <= 0)
m.c1006 = Constraint(expr= m.x1005 - m.b3011 <= 0)
m.c1007 = Constraint(expr= m.x1006 - m.b3011 <= 0)
m.c1008 = Constraint(expr= m.x1007 - m.b3011 <= 0)
m.c1009 = Constraint(expr= m.x1008 - m.b3011 <= 0)
m.c1010 = Constraint(expr= m.x1009 - m.b3011 <= 0)
m.c1011 = Constraint(expr= m.x1010 - m.b3011 <= 0)
m.c1012 = Constraint(expr= m.x1011 - m.b3011 <= 0)
m.c1013 = Constraint(expr= m.x1012 - m.b3011 <= 0)
m.c1014 = Constraint(expr= m.x1013 - m.b3011 <= 0)
m.c1015 = Constraint(expr= m.x1014 - m.b3011 <= 0)
m.c1016 = Constraint(expr= m.x1015 - m.b3011 <= 0)
m.c1017 = Constraint(expr= m.x1016 - m.b3011 <= 0)
m.c1018 = Constraint(expr= m.x1017 - m.b3011 <= 0)
m.c1019 = Constraint(expr= m.x1018 - m.b3011 <= 0)
m.c1020 = Constraint(expr= m.x1019 - m.b3011 <= 0)
m.c1021 = Constraint(expr= m.x1020 - m.b3011 <= 0)
m.c1022 = Constraint(expr= m.x1021 - m.b3011 <= 0)
m.c1023 = Constraint(expr= m.x1022 - m.b3011 <= 0)
m.c1024 = Constraint(expr= m.x1023 - m.b3011 <= 0)
m.c1025 = Constraint(expr= m.x1024 - m.b3011 <= 0)
m.c1026 = Constraint(expr= m.x1025 - m.b3011 <= 0)
m.c1027 = Constraint(expr= m.x1026 - m.b3011 <= 0)
m.c1028 = Constraint(expr= m.x1027 - m.b3011 <= 0)
m.c1029 = Constraint(expr= m.x1028 - m.b3011 <= 0)
m.c1030 = Constraint(expr= m.x1029 - m.b3011 <= 0)
m.c1031 = Constraint(expr= m.x1030 - m.b3011 <= 0)
m.c1032 = Constraint(expr= m.x1031 - m.b3011 <= 0)
m.c1033 = Constraint(expr= m.x1032 - m.b3011 <= 0)
m.c1034 = Constraint(expr= m.x1033 - m.b3011 <= 0)
m.c1035 = Constraint(expr= m.x1034 - m.b3011 <= 0)
m.c1036 = Constraint(expr= m.x1035 - m.b3011 <= 0)
m.c1037 = Constraint(expr= m.x1036 - m.b3011 <= 0)
m.c1038 = Constraint(expr= m.x1037 - m.b3011 <= 0)
m.c1039 = Constraint(expr= m.x1038 - m.b3011 <= 0)
m.c1040 = Constraint(expr= m.x1039 - m.b3011 <= 0)
m.c1041 = Constraint(expr= m.x1040 - m.b3011 <= 0)
m.c1042 = Constraint(expr= m.x1041 - m.b3011 <= 0)
m.c1043 = Constraint(expr= m.x1042 - m.b3011 <= 0)
m.c1044 = Constraint(expr= m.x1043 - m.b3011 <= 0)
m.c1045 = Constraint(expr= m.x1044 - m.b3011 <= 0)
m.c1046 = Constraint(expr= m.x1045 - m.b3011 <= 0)
m.c1047 = Constraint(expr= m.x1046 - m.b3011 <= 0)
m.c1048 = Constraint(expr= m.x1047 - m.b3011 <= 0)
m.c1049 = Constraint(expr= m.x1048 - m.b3011 <= 0)
m.c1050 = Constraint(expr= m.x1049 - m.b3011 <= 0)
m.c1051 = Constraint(expr= m.x1050 - m.b3011 <= 0)
m.c1052 = Constraint(expr= m.x1051 - m.b3011 <= 0)
m.c1053 = Constraint(expr= m.x1052 - m.b3011 <= 0)
m.c1054 = Constraint(expr= m.x1053 - m.b3011 <= 0)
m.c1055 = Constraint(expr= m.x1054 - m.b3011 <= 0)
m.c1056 = Constraint(expr= m.x1055 - m.b3011 <= 0)
m.c1057 = Constraint(expr= m.x1056 | |
1.70, 1.75, 2.0, 2.5]
decs = [0.77, 0.76, 0.75, 0.74, 0.73, 0.66, 0.62, 0.61, 0.50, 0.48, 0.42, 0.38, 0.35, 0.28,0.25]
#decrease the size of Gaussian template, similar to the cfar_seg results.
# [cfar shrink the real target, when outside is lower than center]
wr = wr_lis[snr_lis.index(snr)]
hr = hr_lis[snr_lis.index(snr)]
iw, ih = w_t/wr, min(h_t/hr, h_t)
ix, iy, iw, ih = np.int0([tcx-iw/2, tcy-ih/2, iw, ih])
inner_gauss = template[iy:iy+ih, ix:ix+iw]
dec_coef = np.sqrt(np.power(10, (snr / 10)) * erc_local / np.mean(inner_gauss**2))
dec_str[snr_lis.index(snr)] = '%.2f'%dec_coef
dec_coef = decs[snr_lis.index(snr)]
template = template*dec_coef #np.sqrt(1.618) #/2.8 # Make sure that in shrinked (cfar-segmented) target region still holds low snr.
loc_snr = 10 * np.log10(np.sum(template ** 2) / np.sum(bk_roi ** 2))
glob_snr = 10 * np.log10(np.sum(template ** 2) / (erc * template.size))
# print('Swerling Type %d, kcoef_t %.2f (w %d, h %d), extened_egk %.2E' % (swerling_type, kcoef_t, w, h, Egk_numer))
# print('average (target - local clutter) power is (%.2f - %.2f)' % (np.sum(template ** 2) / template.size, erc_local))
# print('Asked snr is %d, simulated local snr is %.2f, simulated global snr is %.2f' % (snr, loc_snr, glob_snr))
#local_snrs.append(loc_snr)
#global_snrs.append(glob_snr)
mask = ([template > bk_roi]) * template
clutter_background[ly:ly + h_t, lx:lx + w_t] = mask + bk_roi
#clutter_background[ly:ly + h_t, lx:lx + w_t] = template + bk_roi
return clutter_background
def add_gaussian_template_on_clutter_v2(cx, cy, w, h, theta, erc, snr, clutter_background, swerling_type=0):
'''
Rewrite the swerling type's pdf. kgauss is normalized.
:return:
'''
# Erc: average clutter energy.
# Erc = np.sum(clutter_background ** 2) / clutter_background.size
sigma_x = (w/2 - 0.5) / 2 # sigma_x is related to the width of the template
sigma_y = (h/2 - 0.5) / 2
kgauss = gaussian_kernel2d(sigma_x, sigma_y, theta, bnorm=False) # Get diffusive coefficients for a 2d gaussian
Egk_numer = np.sum(kgauss.ravel() ** 2) / kgauss.size # 2d gaussian's average power.
h_t, w_t = kgauss.shape
ly = int(cy - (h_t - 1) / 2)
ry = int(cy + (h_t - 1) / 2)
lx = int(cx - (w_t - 1) / 2)
rx = int(cx + (w_t - 1) / 2)
img_h, img_w = clutter_background.shape
if ly < 0 or lx < 0 or ry > img_h or rx > img_w:
raise ValueError('template location is beyond the image boundaries!')
bk_roi = clutter_background[ly:ly + h_t, lx:lx + w_t]
# compute the amplitude coefficients according to the SNR Eq.
kcoef_global = np.sqrt(np.power(10, (snr / 10)) * erc / Egk_numer)
kcoef_peak = np.sqrt(np.power(10, (snr / 10)) * erc) # point's snr reversion
# average power of clutter is computed by numerical results in local roi-window.
erc_local = np.sum(bk_roi ** 2) / bk_roi.size
kcoef_local = np.sqrt(np.power(10, (snr / 10)) * erc_local / Egk_numer)
kcoef = kcoef_peak
if swerling_type == 0: # swerling type 0 target
kcoef_t = kcoef
template = kgauss * kcoef_t
if swerling_type == 1:
ray_scale = kcoef/np.sqrt(2)#choosing mode # /np.sqrt(2)
# central amplitude obeys the rayleigh distribution, which 2*sigma^2 = sigma_t = kcoef**2 (swerling_0's Amplitude)
kcoefs = rayleigh.rvs(loc=0, scale=ray_scale, size=1000)
kcoef_t = np.mean(kcoefs)
template = kgauss * kcoef_t
if swerling_type == 3: # central amplitude obeys the chi distribution, which degrees of freedom k=4.
df = 4
chi2_scale= kcoef/np.sqrt(df*2+df**2)#np.sqrt(df-2)#
kcoefs = chi2.rvs(df=df, scale=chi2_scale, size=1000)# or kcoef_t = chi2.rvs(df=kcoef, size=1), then template=kgauss*kcoef
kcoef_t = np.mean(kcoefs)
template = kgauss * (kcoef_t) #
# Get decrease_coeffient to make sure the inner gaussian template satisfy the snr requirement.
tcx, tcy = w_t / 2, h_t / 2
snr_lis = list(range(12, -3, -1)) # [12, 11, ..., -1, -2]
# shrink rate, take from cfar results.
snr_lis = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2]
wr_lis = [1.62, 1.67, 1.65, 1.76, 1.80, 2.00, 2.20, 2.30, 3.20, 3.50, 3.70, 3.90, 4.00, 4.2, 4.5]
hr_lis = [0.88, 0.89, 0.90, 0.92, 1.00, 1.10, 1.20, 1.20, 1.55, 1.55, 1.65, 1.70, 1.75, 2.0, 2.5]
incs_sw1= np.linspace(1.00, 2.55, 15)#[0.95, 1.00, 0.90, 0.85, 0.80, 1.10, 1.10, 1.10, 1.10, 1.10, 1.10, 2.00, 2.00, 2.20, 2.50]
#incs_sw1 = np.log2(1+incs_sw1)
decs = np.linspace(0.78, 0.34, 15)
#decs_sw1= np.linspace(1.00, 0.45, 15)
decs_sw3= np.linspace(1.20, 0.30, 15)
# decrease the size of Gaussian template, similar to the cfar_seg results.
# [cfar shrink the real target, when outside is lower than center]
wr = wr_lis[snr_lis.index(snr)]
hr = hr_lis[snr_lis.index(snr)]
iw, ih = w_t / wr, min(h_t / hr, h_t)
ix, iy, iw, ih = np.int0([tcx - iw / 2, tcy - ih / 2, iw, ih])
inner_gauss = template[iy:iy + ih, ix:ix + iw]
#dec_coef = np.sqrt(np.power(10, (snr / 10)) * erc_local / np.mean(inner_gauss ** 2))
#dec_str[snr_lis.index(snr)] = '%.2f' % dec_coef
if swerling_type == 0: # decreasing for non-fluctuating target type
dec_coef = decs[snr_lis.index(snr)]
template = template * 1#dec_coef # np.sqrt(1.618) #/2.8 # Make sure that in shrinked (cfar-segmented) target region still holds low snr.
if swerling_type == 1:
inc_coef = incs_sw1[snr_lis.index(snr)]
template = template * 1 #inc_coef
if swerling_type == 3:
dec_coef = decs_sw3[snr_lis.index(snr)]
template = template * 1#dec_coef
loc_snr = 10 * np.log10(np.sum(template ** 2) / np.sum(bk_roi ** 2))
glob_snr = 10 * np.log10(np.sum(template ** 2) / (erc * template.size))
peak_snr = 10 * np.log10(np.max(template)**2 / erc) #point's snr
# print('Swerling Type %d, kcoef_t %.2f (w %d, h %d), extened_egk %.2E' % (swerling_type, kcoef_t, w, h, Egk_numer))
# print('average (target - local clutter) power is (%.2f - %.2f)' % (np.sum(template ** 2) / template.size, erc_local))
# print('Asked snr is %d, simulated local snr is %.2f, simulated global snr is %.2f' % (snr, loc_snr, glob_snr))
local_snrs.append(loc_snr)
global_snrs.append(peak_snr)
mask = ([template > bk_roi]) * template
clutter_background[ly:ly + h_t, lx:lx + w_t] = mask + bk_roi
#clutter_background[ly:ly + h_t, lx:lx + w_t] = template + bk_roi
#Real_SNR is normally higher than peak_snr
real_snr = 10 * np.log10(max(np.max(template + bk_roi)-np.sqrt(2), np.spacing(1)) / 2)
return clutter_background
def add_uniform_template_on_clutter(cx, cy, w, h, theta, erc, snr, clutter_background, swerling_type=0):
# Erc: average clutter energy.
# Erc = np.sum(clutter_background ** 2) / clutter_background.size
# Clutter_background is a clutter background template.
kuniform = np.ones((int(h),int(w)))/(h*w)
unk_numer = np.sum(kuniform.ravel() ** 2) / kuniform.size # 2d gaussian's average power.
h_t, w_t = kuniform.shape
ly = int(cy - (h_t - 1) / 2)
ry = int(cy + (h_t - 1) / 2)
lx = int(cx - (w_t - 1) / 2)
rx = int(cx + (w_t - 1) / 2)
img_h, img_w = clutter_background.shape
if ly < 0 or lx < 0 or ry > img_h or rx > img_w:
raise ValueError('template location is beyond the image boundaries!')
bk_roi = clutter_background[ly:ly + h_t, lx:lx + w_t]
kcoef_global = np.sqrt(np.power(10, (snr / 10)) * erc / unk_numer)
erc_local = np.sum(bk_roi**2)/bk_roi.size
kcoef_local = np.sqrt(np.power(10, (snr / 10)) * erc_local / unk_numer)
kcoef = kcoef_global
if swerling_type == 0: #swerling type 0 target
kcoef_t = kcoef
template = kuniform * kcoef_t
if swerling_type == 1: #central amplitude obeys the rayleigh distribution, which 2*sigma^2 = sigma_t = kcoef (swerling_0's Amplitude)
sigma = kcoef#/np.sqrt(2)
kcoef_t = rayleigh.rvs(loc=0, scale=sigma, size=1)
template = kuniform * kcoef_t
if swerling_type == 3: #central amplitude obeys the chi distribution, which degrees of freedom k=4.
kcoef_t = chi2.rvs(df=kcoef, size=1) # or kcoef_t = chi2.rvs(df=kcoef, size=1), then template=kgauss*kcoef
template = kuniform*(kcoef_t) # for chi2, Mean=df.
loc_snr = 10*np.log10(np.sum(template**2)/np.sum(bk_roi**2))
glob_snr = 10*np.log10(np.sum(template ** 2)/(erc * template.size))
# print('Swerling Type %d, kcoef_t %.2f (w %d, h %d), extened_unk %.2E' % (swerling_type, kcoef_t, w, h, unk_numer))
# print('average (target - local clutter) power is (%.2f - %.2f)' % (np.sum(template ** 2) / template.size, erc_local))
# print('Asked snr is %d, simulated local snr is %.2f, simulated global snr is %.2f' % (snr, loc_snr, glob_snr))
local_snrs.append(loc_snr)
global_snrs.append(glob_snr)
#mask = ([template > bk_roi]) * template
#clutter_background[ly:ly + h_t, lx:lx + w_t] = mask + bk_roi
clutter_background[ly:ly + h_t, lx:lx + w_t] = template + bk_roi
return clutter_background
def get_frame(img_w, img_h, frame_no, snr, gt_dict, swerling_type=0):
'''
Get one frame combine targets and clutter together.
#add swerling type on Mar 2, 2021.
:param frame_no:
:return:
'''
| |
<reponame>BubuLK/sfepy
import sympy as sm
x, y, z = sm.symbols('x y z')
all_bfs = {
2 : {
1 : [
[
x*(y - 1.0) - y + 1.0 ,
x*(1.0 - y) ,
x*y ,
-x*y + y ,
],
[
[y - 1.0, x - 1.0] ,
[1.0 - y, -x] ,
[y, x] ,
[-y, 1.0 - x] ,
],
],
2 : [
[
x*(x*(2.0 - 2.0*y) + y*(5.0 - 2.0*y) - 3.0) + y*(2.0*y - 3.0) + 1.0 ,
x*(x*(2.0 - 2.0*y) + y*(2.0*y - 1.0) - 1.0) ,
x*(2.0*x*y + y*(2.0*y - 3.0)) ,
x*(2.0*x*y + y*(-2.0*y - 1.0)) + y*(2.0*y - 1.0) ,
x*(x*(4*y - 4) - 4*y + 4) ,
x*y*(4.0 - 4.0*y) ,
x*(-4.0*x*y + 4.0*y) ,
x*y*(4.0*y - 4.0) + y*(4.0 - 4.0*y) ,
],
[
[x*(4.0 - 4.0*y) + y*(5.0 - 2.0*y) - 3.0, x*(-2.0*x - 4.0*y + 5.0) + 4.0*y - 3.0] ,
[x*(4.0 - 4.0*y) + y*(2.0*y - 1.0) - 1.0, x*(-2.0*x + 4.0*y - 1.0)] ,
[4.0*x*y + y*(2.0*y - 3.0), x*(2.0*x + 4.0*y - 3.0)] ,
[4.0*x*y + y*(-2.0*y - 1.0), x*(2.0*x - 4.0*y - 1.0) + 4.0*y - 1.0] ,
[x*(8*y - 8) - 4*y + 4, x*(4*x - 4)] ,
[y*(4.0 - 4.0*y), x*(4.0 - 8.0*y)] ,
[-8.0*x*y + 4.0*y, x*(4.0 - 4.0*x)] ,
[y*(4.0*y - 4.0), x*(8.0*y - 4.0) - 8.0*y + 4.0] ,
],
],
3 : [
[
x*(x*(x*(4.5*y - 4.5) - 9.0*y + 9.0) + y*(y*(4.5*y - 9.0) + 10.0) - 5.5) + y*(y*(9.0 - 4.5*y) - 5.5) + 1.0 ,
x*(x*(x*(4.5 - 4.5*y) + 4.5*y - 4.5) + y*(y*(9.0 - 4.5*y) - 5.5) + 1.0) ,
x*(x*(4.5*x*y - 4.5*y) + y*(y*(4.5*y - 4.5) + 1.0)) ,
x*(x*(-4.5*x*y + 9.0*y) + y*(y*(4.5 - 4.5*y) - 5.5)) + y*(y*(4.5*y - 4.5) + 1.0) ,
x*(x*(x*(13.5 - 13.5*y) + 22.5*y - 22.5) - 9.0*y + 9.0) ,
x*(x*(x*(13.5*y - 13.5) - 18.0*y + 18.0) + 4.5*y - 4.5) ,
x*y*(y*(13.5*y - 22.5) + 9.0) ,
x*y*(y*(18.0 - 13.5*y) - 4.5) ,
x*(x*(-13.5*x*y + 18.0*y) - 4.5*y) ,
x*(x*(13.5*x*y - 22.5*y) + 9.0*y) ,
x*y*(y*(13.5*y - 18.0) + 4.5) + y*(y*(18.0 - 13.5*y) - 4.5) ,
x*y*(y*(22.5 - 13.5*y) - 9.0) + y*(y*(13.5*y - 22.5) + 9.0) ,
],
[
[x*(x*(13.5*y - 13.5) - 18.0*y + 18.0) + y*(y*(4.5*y - 9.0) + 10.0) - 5.5, x*(x*(4.5*x - 9.0) + y*(13.5*y - 18.0) + 10.0) + y*(18.0 - 13.5*y) - 5.5] ,
[x*(x*(13.5 - 13.5*y) + 9.0*y - 9.0) + y*(y*(9.0 - 4.5*y) - 5.5) + 1.0, x*(x*(4.5 - 4.5*x) + y*(18.0 - 13.5*y) - 5.5)] ,
[x*(13.5*x*y - 9.0*y) + y*(y*(4.5*y - 4.5) + 1.0), x*(x*(4.5*x - 4.5) + y*(13.5*y - 9.0) + 1.0)] ,
[x*(-13.5*x*y + 18.0*y) + y*(y*(4.5 - 4.5*y) - 5.5), x*(x*(9.0 - 4.5*x) + y*(9.0 - 13.5*y) - 5.5) + y*(13.5*y - 9.0) + 1.0] ,
[x*(x*(40.5 - 40.5*y) + 45.0*y - 45.0) - 9.0*y + 9.0, x*(x*(22.5 - 13.5*x) - 9.0)] ,
[x*(x*(40.5*y - 40.5) - 36.0*y + 36.0) + 4.5*y - 4.5, x*(x*(13.5*x - 18.0) + 4.5)] ,
[y*(y*(13.5*y - 22.5) + 9.0), x*(y*(40.5*y - 45.0) + 9.0)] ,
[y*(y*(18.0 - 13.5*y) - 4.5), x*(y*(36.0 - 40.5*y) - 4.5)] ,
[x*(-40.5*x*y + 36.0*y) - 4.5*y, x*(x*(18.0 - 13.5*x) - 4.5)] ,
[x*(40.5*x*y - 45.0*y) + 9.0*y, x*(x*(13.5*x - 22.5) + 9.0)] ,
[y*(y*(13.5*y - 18.0) + 4.5), x*(y*(40.5*y - 36.0) + 4.5) + y*(36.0 - 40.5*y) - 4.5] ,
[y*(y*(22.5 - 13.5*y) - 9.0), x*(y*(45.0 - 40.5*y) - 9.0) + y*(40.5*y - 45.0) + 9.0] ,
],
],
},
3 : {
1 : [
[
x*(y*(1.0 - z) + z - 1.0) + y*(z - 1.0) - z + 1.0 ,
x*(y*(z - 1.0) - z + 1.0) ,
x*y*(1.0 - z) ,
x*y*(z - 1.0) + y*(1.0 - z) ,
x*(y*z - z) - y*z + z ,
x*(-y*z + z) ,
x*y*z ,
-x*y*z + y*z ,
],
[
[y*(1.0 - z) + z - 1.0, x*(1.0 - z) + z - 1.0, x*(1.0 - y) + y - 1.0] ,
[y*(z - 1.0) - z + 1.0, x*(z - 1.0), x*(y - 1.0)] ,
[y*(1.0 - z), x*(1.0 - z), -x*y] ,
[y*(z - 1.0), x*(z - 1.0) - z + 1.0, x*y - y] ,
[y*z - z, x*z - z, x*(y - 1.0) - y + 1.0] ,
[-y*z + z, -x*z, x*(1.0 - y)] ,
[y*z, x*z, x*y] ,
[-y*z, -x*z + z, -x*y + y] ,
],
],
2 : [
[
x*(x*(y*(2.0*z - 2.0) - 2.0*z + 2.0) + y*(y*(2.0*z - 2.0) + z*(2.0*z - 7.0) + 5.0) + z*(5.0 - 2.0*z) - 3.0) + y*(y*(2.0 - 2.0*z) + z*(5.0 - 2.0*z) - 3.0) + z*(2.0*z - 3.0) + 1.0 ,
x*(x*(y*(2.0*z - 2.0) - 2.0*z + 2.0) + y*(y*(2.0 - 2.0*z) + z*(3.0 - 2.0*z) - 1.0) + z*(2.0*z - 1.0) - 1.0) ,
x*(x*y*(2.0 - 2.0*z) + y*(y*(2.0 - 2.0*z) + z*(2.0*z + 1.0) - 3.0)) ,
x*(x*y*(2.0 - 2.0*z) + y*(y*(2.0*z - 2.0) + z*(3.0 - 2.0*z) - 1.0)) + y*(y*(2.0 - 2.0*z) + z*(2.0*z - 1.0) - 1.0) ,
x*(x*(-2.0*y*z + 2.0*z) + y*(-2.0*y*z + z*(2.0*z + 3.0)) + z*(-2.0*z - 1.0)) + y*(2.0*y*z + z*(-2.0*z - 1.0)) + z*(2.0*z - 1.0) ,
x*(x*(-2.0*y*z + 2.0*z) + y*(2.0*y*z + z*(1.0 - 2.0*z)) + z*(2.0*z - 3.0)) ,
x*(2.0*x*y*z + y*(2.0*y*z + z*(2.0*z - 5.0))) ,
x*(2.0*x*y*z + y*(-2.0*y*z + z*(1.0 - 2.0*z))) + y*(2.0*y*z + z*(2.0*z - 3.0)) ,
x*(x*(y*(4.0 - 4.0*z) + 4.0*z - 4.0) + y*(4.0*z - 4.0) - 4.0*z + 4.0) ,
x*y*(y*(4.0*z - 4.0) - 4.0*z + 4.0) ,
x*(x*y*(4.0*z - 4.0) + y*(4.0 - 4.0*z)) ,
x*y*(y*(4.0 - 4.0*z) + 4.0*z - 4.0) + y*(y*(4.0*z - 4.0) - 4.0*z + 4.0) ,
x*(x*(4.0*y*z - 4.0*z) - 4.0*y*z + 4.0*z) ,
x*y*(-4.0*y*z + 4.0*z) ,
x*(-4.0*x*y*z + 4.0*y*z) ,
x*y*(4.0*y*z - 4.0*z) + y*(-4.0*y*z + 4.0*z) ,
x*(y*z*(4.0 - 4.0*z) + z*(4.0*z - 4.0)) + y*z*(4.0*z - 4.0) + z*(4.0 - 4.0*z) ,
x*(y*z*(4.0*z - 4.0) + z*(4.0 - 4.0*z)) ,
x*y*z*(4.0 - 4.0*z) ,
x*y*z*(4.0*z - 4.0) + y*z*(4.0 - 4.0*z) ,
],
[
[x*(y*(4.0*z - 4.0) - 4.0*z + 4.0) + y*(y*(2.0*z - 2.0) + z*(2.0*z - 7.0) + 5.0) + z*(5.0 - 2.0*z) - 3.0, x*(x*(2.0*z - 2.0) + y*(4.0*z - 4.0) + z*(2.0*z - 7.0) + 5.0) + y*(4.0 - 4.0*z) + z*(5.0 - 2.0*z) - 3.0, x*(x*(2.0*y - 2.0) + y*(2.0*y + 4.0*z - 7.0) - 4.0*z + 5.0) + y*(-2.0*y - 4.0*z + 5.0) + 4.0*z - 3.0] ,
[x*(y*(4.0*z - 4.0) - 4.0*z + 4.0) + y*(y*(2.0 - 2.0*z) + z*(3.0 - 2.0*z) - 1.0) + z*(2.0*z - 1.0) - 1.0, x*(x*(2.0*z - 2.0) + y*(4.0 - 4.0*z) + z*(3.0 - 2.0*z) - 1.0), x*(x*(2.0*y - 2.0) + y*(-2.0*y - 4.0*z + 3.0) + 4.0*z - 1.0)] ,
[x*y*(4.0 - 4.0*z) + y*(y*(2.0 - 2.0*z) + z*(2.0*z + 1.0) - 3.0), x*(x*(2.0 - 2.0*z) + y*(4.0 - 4.0*z) + z*(2.0*z + 1.0) - 3.0), x*(-2.0*x*y + y*(-2.0*y + 4.0*z + 1.0))] ,
[x*y*(4.0 - 4.0*z) + y*(y*(2.0*z - 2.0) + z*(3.0 - 2.0*z) - 1.0), x*(x*(2.0 - 2.0*z) + y*(4.0*z - 4.0) + z*(3.0 - 2.0*z) - 1.0) + y*(4.0 - 4.0*z) + z*(2.0*z - 1.0) - 1.0, x*(-2.0*x*y + y*(2.0*y - 4.0*z + 3.0)) + y*(-2.0*y + 4.0*z - 1.0)] ,
[x*(-4.0*y*z + 4.0*z) + y*(-2.0*y*z + z*(2.0*z + 3.0)) + z*(-2.0*z - 1.0), x*(-2.0*x*z - 4.0*y*z + z*(2.0*z + 3.0)) + 4.0*y*z + z*(-2.0*z - 1.0), x*(x*(2.0 - 2.0*y) + y*(-2.0*y + 4.0*z + 3.0) - 4.0*z - 1.0) + y*(2.0*y - 4.0*z - 1.0) + 4.0*z - 1.0] ,
[x*(-4.0*y*z + 4.0*z) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.